diff --git a/LICENSE b/LICENSE index 261eeb9..f4bbcd2 100644 --- a/LICENSE +++ b/LICENSE @@ -1,201 +1,373 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Mozilla Public License Version 2.0 +================================== - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions +-------------- - 1. Definitions. +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +1.3. "Contribution" + means Covered Software of a particular Contributor. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +1.5. "Incompatible With Secondary Licenses" + means - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +1.6. "Executable Form" + means any form of the work other than Source Code Form. - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +1.8. "License" + means this document. - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +1.10. "Modifications" + means any of the following: - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: + (b) any new file in Source Code Form that contains any Covered + Software. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +1.13. "Source Code Form" + means the form of the work preferred for making modifications. - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +2. License Grants and Conditions +-------------------------------- - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +2.1. Grants - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +2.2. Effective Date - END OF TERMS AND CONDITIONS +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. - APPENDIX: How to apply the Apache License to your work. +2.3. Limitations on Grant Scope - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: - Copyright [yyyy] [name of copyright owner] +(a) for any code that a Contributor has removed from Covered Software; + or - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or - http://www.apache.org/licenses/LICENSE-2.0 +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/Cargo.lock b/gstreamer/Cargo.lock similarity index 100% rename from Cargo.lock rename to gstreamer/Cargo.lock diff --git a/Cargo.toml b/gstreamer/Cargo.toml similarity index 70% rename from Cargo.toml rename to gstreamer/Cargo.toml index e8a6a6d..a3248dd 100644 --- a/Cargo.toml +++ b/gstreamer/Cargo.toml @@ -12,6 +12,22 @@ rust-version = "1.70" url = "2" gst = { package = "gstreamer", git = "https://gitlab.freedesktop.org/gstreamer/gstreamer-rs" } gst-base = { package = "gstreamer-base", git = "https://gitlab.freedesktop.org/gstreamer/gstreamer-rs" } +gst-video.workspace = true +wayland-display-core = { path = "../wayland-display-core" } +moq-transport = { path = "../moq-transport" } +tracing.workspace = true +once_cell.workspace = true +tracing-subscriber = "0.3.16" + +# QUIC +quinn = "0.10" +webtransport-quinn = "0.6.1" +url = "2" + +# Crypto +rustls = { version = "0.21", features = ["dangerous_configuration"] } +rustls-native-certs = "0.6" +rustls-pemfile = "1" [lib] name = "gstwarp" diff --git a/build.rs b/gstreamer/build.rs similarity index 100% rename from build.rs rename to gstreamer/build.rs diff --git a/gstreamer/src/README.md b/gstreamer/src/README.md new file mode 100644 index 0000000..5be79e1 --- /dev/null +++ b/gstreamer/src/README.md @@ -0,0 +1,51 @@ +To be used as follows: + +```rust +use gst::prelude::*; + +fn main() { + // Initialize GStreamer + gst::init().unwrap(); + + // Create the elements + let source = gst::ElementFactory::make("videotestsrc", Some("source")).unwrap(); + let encoder = gst::ElementFactory::make("x264enc", Some("encoder")).unwrap(); + let muxer = gst::ElementFactory::make("fmp4mux", Some("muxer")).unwrap(); + let sink = gst::ElementFactory::make("warpsink", Some("sink")).unwrap(); + + // Create an empty pipeline + let pipeline = gst::Pipeline::new(Some("pipeline")); + + // Build the pipeline + pipeline.add_many(&[&source, &encoder, &muxer, &sink]).unwrap(); + gst::Element::link_many(&[&source, &encoder, &muxer, &sink]).unwrap(); + + // Start playing + pipeline.set_state(gst::State::Playing).unwrap(); + + // Wait until error or EOS + let bus = pipeline.bus().unwrap(); + for msg in bus.iter_timed(gst::CLOCK_TIME_NONE) { + use gst::MessageView; + + match msg.view() { + MessageView::Error(err) => { + eprintln!( + "Error from {:?}: {} ({:?})", + err.src().map(|s| s.path_string()), + err.error(), + err.debug() + ); + break; + } + MessageView::Eos(..) => break, + _ => (), + } + } + + // Clean up + pipeline.set_state(gst::State::Null).unwrap(); +} +``` + + diff --git a/gstreamer/src/fmp4mux/LICENSE.md b/gstreamer/src/fmp4mux/LICENSE.md new file mode 100644 index 0000000..f4bbcd2 --- /dev/null +++ b/gstreamer/src/fmp4mux/LICENSE.md @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/gstreamer/src/fmp4mux/README.md b/gstreamer/src/fmp4mux/README.md new file mode 100644 index 0000000..6e68fce --- /dev/null +++ b/gstreamer/src/fmp4mux/README.md @@ -0,0 +1 @@ +https://github.com/sdroege/gst-plugin-rs/tree/main/mux/fmp4 \ No newline at end of file diff --git a/gstreamer/src/fmp4mux/boxes.rs b/gstreamer/src/fmp4mux/boxes.rs new file mode 100644 index 0000000..a6165ff --- /dev/null +++ b/gstreamer/src/fmp4mux/boxes.rs @@ -0,0 +1,2258 @@ +// Copyright (C) 2021 Sebastian Dröge +// +// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0. +// If a copy of the MPL was not distributed with this file, You can obtain one at +// . +// +// SPDX-License-Identifier: MPL-2.0 + +use gst::prelude::*; + +use anyhow::{anyhow, bail, Context, Error}; + +use super::Buffer; + +fn write_box) -> Result>( + vec: &mut Vec, + fourcc: impl std::borrow::Borrow<[u8; 4]>, + content_func: F, +) -> Result { + // Write zero size ... + let size_pos = vec.len(); + vec.extend([0u8; 4]); + vec.extend(fourcc.borrow()); + + let res = content_func(vec)?; + + // ... and update it here later. + let size: u32 = vec + .len() + .checked_sub(size_pos) + .expect("vector shrunk") + .try_into() + .context("too big box content")?; + vec[size_pos..][..4].copy_from_slice(&size.to_be_bytes()); + + Ok(res) +} + +const FULL_BOX_VERSION_0: u8 = 0; +const FULL_BOX_VERSION_1: u8 = 1; + +const FULL_BOX_FLAGS_NONE: u32 = 0; + +fn write_full_box) -> Result>( + vec: &mut Vec, + fourcc: impl std::borrow::Borrow<[u8; 4]>, + version: u8, + flags: u32, + content_func: F, +) -> Result { + write_box(vec, fourcc, move |vec| { + assert_eq!(flags >> 24, 0); + vec.extend(((u32::from(version) << 24) | flags).to_be_bytes()); + content_func(vec) + }) +} + +fn cmaf_brands_from_caps(caps: &gst::CapsRef, compatible_brands: &mut Vec<&'static [u8; 4]>) { + let s = caps.structure(0).unwrap(); + match s.name().as_str() { + "video/x-h264" => { + let width = s.get::("width").ok(); + let height = s.get::("height").ok(); + let fps = s.get::("framerate").ok(); + let profile = s.get::<&str>("profile").ok(); + let level = s + .get::<&str>("level") + .ok() + .map(|l| l.split_once('.').unwrap_or((l, "0"))); + let colorimetry = s.get::<&str>("colorimetry").ok(); + + if let (Some(width), Some(height), Some(profile), Some(level), Some(fps)) = + (width, height, profile, level, fps) + { + if profile == "high" + || profile == "main" + || profile == "baseline" + || profile == "constrained-baseline" + { + if width <= 864 + && height <= 576 + && level <= ("3", "1") + && fps <= gst::Fraction::new(60, 1) + { + if let Some(colorimetry) = + colorimetry.and_then(|c| c.parse::().ok()) + { + if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt709 + | gst_video::VideoColorPrimaries::Bt470bg + | gst_video::VideoColorPrimaries::Smpte170m + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Bt709 + | gst_video::VideoTransferFunction::Bt601 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt709 + | gst_video::VideoColorMatrix::Bt601 + ) { + compatible_brands.push(b"cfsd"); + } + } else { + // Assume it's OK + compatible_brands.push(b"cfsd"); + } + } else if width <= 1920 + && height <= 1080 + && level <= ("4", "0") + && fps <= gst::Fraction::new(60, 1) + { + if let Some(colorimetry) = + colorimetry.and_then(|c| c.parse::().ok()) + { + if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt709 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Bt709 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt709 + ) { + compatible_brands.push(b"cfhd"); + } + } else { + // Assume it's OK + compatible_brands.push(b"cfhd"); + } + } else if width <= 1920 + && height <= 1080 + && level <= ("4", "2") + && fps <= gst::Fraction::new(60, 1) + { + if let Some(colorimetry) = + colorimetry.and_then(|c| c.parse::().ok()) + { + if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt709 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Bt709 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt709 + ) { + compatible_brands.push(b"chdf"); + } + } else { + // Assume it's OK + compatible_brands.push(b"chdf"); + } + } + } + } + } + "audio/mpeg" => { + compatible_brands.push(b"caac"); + } + "video/x-h265" => { + let width = s.get::("width").ok(); + let height = s.get::("height").ok(); + let fps = s.get::("framerate").ok(); + let profile = s.get::<&str>("profile").ok(); + let tier = s.get::<&str>("tier").ok(); + let level = s + .get::<&str>("level") + .ok() + .map(|l| l.split_once('.').unwrap_or((l, "0"))); + let colorimetry = s.get::<&str>("colorimetry").ok(); + + if let (Some(width), Some(height), Some(profile), Some(tier), Some(level), Some(fps)) = + (width, height, profile, tier, level, fps) + { + if profile == "main" && tier == "main" { + if width <= 1920 + && height <= 1080 + && level <= ("4", "1") + && fps <= gst::Fraction::new(60, 1) + { + if let Some(colorimetry) = + colorimetry.and_then(|c| c.parse::().ok()) + { + if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt709 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Bt709 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt709 + ) { + compatible_brands.push(b"chhd"); + } + } else { + // Assume it's OK + compatible_brands.push(b"chhd"); + } + } else if width <= 3840 + && height <= 2160 + && level <= ("5", "0") + && fps <= gst::Fraction::new(60, 1) + { + if let Some(colorimetry) = + colorimetry.and_then(|c| c.parse::().ok()) + { + if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt709 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Bt709 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt709 + ) { + compatible_brands.push(b"cud8"); + } + } else { + // Assume it's OK + compatible_brands.push(b"cud8"); + } + } + } else if profile == "main-10" && tier == "main-10" { + if width <= 1920 + && height <= 1080 + && level <= ("4", "1") + && fps <= gst::Fraction::new(60, 1) + { + if let Some(colorimetry) = + colorimetry.and_then(|c| c.parse::().ok()) + { + if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt709 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Bt709 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt709 + ) { + compatible_brands.push(b"chh1"); + } + } else { + // Assume it's OK + compatible_brands.push(b"chh1"); + } + } else if width <= 3840 + && height <= 2160 + && level <= ("5", "1") + && fps <= gst::Fraction::new(60, 1) + { + if let Some(colorimetry) = + colorimetry.and_then(|c| c.parse::().ok()) + { + if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt709 + | gst_video::VideoColorPrimaries::Bt2020 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Bt709 + | gst_video::VideoTransferFunction::Bt202010 + | gst_video::VideoTransferFunction::Bt202012 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt709 + | gst_video::VideoColorMatrix::Bt2020 + ) { + compatible_brands.push(b"cud1"); + } else if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt2020 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::Smpte2084 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt2020 + ) { + compatible_brands.push(b"chd1"); + } else if matches!( + colorimetry.primaries(), + gst_video::VideoColorPrimaries::Bt2020 + ) && matches!( + colorimetry.transfer(), + gst_video::VideoTransferFunction::AribStdB67 + ) && matches!( + colorimetry.matrix(), + gst_video::VideoColorMatrix::Bt2020 + ) { + compatible_brands.push(b"clg1"); + } + } else { + // Assume it's OK + compatible_brands.push(b"cud1"); + } + } + } + } + } + _ => (), + } +} + +fn brands_from_variant_and_caps<'a>( + variant: super::Variant, + mut caps: impl Iterator, +) -> (&'static [u8; 4], Vec<&'static [u8; 4]>) { + match variant { + super::Variant::ISO | super::Variant::ONVIF => (b"iso6", vec![b"iso6"]), + super::Variant::DASH => { + // FIXME: `dsms` / `dash` brands, `msix` + (b"msdh", vec![b"dums", b"msdh", b"iso6"]) + } + super::Variant::CMAF => { + let mut compatible_brands = vec![b"iso6", b"cmfc"]; + + cmaf_brands_from_caps(caps.next().unwrap(), &mut compatible_brands); + assert_eq!(caps.next(), None); + + (b"cmf2", compatible_brands) + } + } +} + +/// Creates `ftyp` and `moov` boxes +pub(super) fn create_fmp4_header(cfg: super::HeaderConfiguration) -> Result { + let mut v = vec![]; + + let (brand, compatible_brands) = + brands_from_variant_and_caps(cfg.variant, cfg.streams.iter().map(|s| &s.caps)); + + write_box(&mut v, b"ftyp", |v| { + // major brand + v.extend(brand); + // minor version + v.extend(0u32.to_be_bytes()); + // compatible brands + v.extend(compatible_brands.into_iter().flatten()); + + Ok(()) + })?; + + write_box(&mut v, b"moov", |v| write_moov(v, &cfg))?; + + if cfg.variant == super::Variant::ONVIF { + write_full_box( + &mut v, + b"meta", + FULL_BOX_VERSION_0, + FULL_BOX_FLAGS_NONE, + |v| { + write_full_box(v, b"hdlr", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + // Handler type + v.extend(b"null"); + + // Reserved + v.extend([0u8; 3 * 4]); + + // Name + v.extend(b"MetadataHandler"); + + Ok(()) + })?; + + write_box(v, b"cstb", |v| { + // entry count + v.extend(1u32.to_be_bytes()); + + // track id + v.extend(0u32.to_be_bytes()); + + // start UTC time in 100ns units since Jan 1 1601 + v.extend(cfg.start_utc_time.unwrap().to_be_bytes()); + + Ok(()) + }) + }, + )?; + } + + Ok(gst::Buffer::from_mut_slice(v)) +} + +fn write_moov(v: &mut Vec, cfg: &super::HeaderConfiguration) -> Result<(), Error> { + use gst::glib; + + let base = glib::DateTime::from_utc(1904, 1, 1, 0, 0, 0.0)?; + let now = glib::DateTime::now_utc()?; + let creation_time = + u64::try_from(now.difference(&base).as_seconds()).expect("time before 1904"); + + write_full_box(v, b"mvhd", FULL_BOX_VERSION_1, FULL_BOX_FLAGS_NONE, |v| { + write_mvhd(v, cfg, creation_time) + })?; + for (idx, stream) in cfg.streams.iter().enumerate() { + write_box(v, b"trak", |v| { + let mut references = vec![]; + + // Reference the video track for ONVIF metadata tracks + if cfg.variant == super::Variant::ONVIF + && stream.caps.structure(0).unwrap().name() == "application/x-onvif-metadata" + { + // Find the first video track + for (idx, other_stream) in cfg.streams.iter().enumerate() { + let s = other_stream.caps.structure(0).unwrap(); + + if matches!( + s.name().as_str(), + "video/x-h264" | "video/x-h265" | "image/jpeg" + ) { + references.push(TrackReference { + reference_type: *b"cdsc", + track_ids: vec![idx as u32 + 1], + }); + break; + } + } + } + + write_trak(v, cfg, idx, stream, creation_time, &references) + })?; + } + write_box(v, b"mvex", |v| write_mvex(v, cfg))?; + + Ok(()) +} + +fn caps_to_timescale(caps: &gst::CapsRef) -> u32 { + let s = caps.structure(0).unwrap(); + + if let Ok(fps) = s.get::("framerate") { + if fps.numer() == 0 { + return 10_000; + } + + if fps.denom() != 1 && fps.denom() != 1001 { + if let Some(fps) = (fps.denom() as u64) + .nseconds() + .mul_div_round(1_000_000_000, fps.numer() as u64) + .and_then(gst_video::guess_framerate) + { + return (fps.numer() as u32) + .mul_div_round(100, fps.denom() as u32) + .unwrap_or(10_000); + } + } + + if fps.denom() == 1001 { + fps.numer() as u32 + } else { + (fps.numer() as u32) + .mul_div_round(100, fps.denom() as u32) + .unwrap_or(10_000) + } + } else if let Ok(rate) = s.get::("rate") { + rate as u32 + } else { + 10_000 + } +} + +fn header_stream_to_timescale(stream: &super::HeaderStream) -> u32 { + if stream.trak_timescale > 0 { + stream.trak_timescale + } else { + caps_to_timescale(&stream.caps) + } +} + +fn header_configuration_to_timescale(cfg: &super::HeaderConfiguration) -> u32 { + if cfg.movie_timescale > 0 { + cfg.movie_timescale + } else { + // Use the reference track timescale + header_stream_to_timescale(&cfg.streams[0]) + } +} + +fn fragment_header_stream_to_timescale(stream: &super::FragmentHeaderStream) -> u32 { + if stream.trak_timescale > 0 { + stream.trak_timescale + } else { + caps_to_timescale(&stream.caps) + } +} + +fn write_mvhd( + v: &mut Vec, + cfg: &super::HeaderConfiguration, + creation_time: u64, +) -> Result<(), Error> { + // Creation time + v.extend(creation_time.to_be_bytes()); + // Modification time + v.extend(creation_time.to_be_bytes()); + // Timescale + v.extend(header_configuration_to_timescale(cfg).to_be_bytes()); + // Duration + v.extend(0u64.to_be_bytes()); + + // Rate 1.0 + v.extend((1u32 << 16).to_be_bytes()); + // Volume 1.0 + v.extend((1u16 << 8).to_be_bytes()); + // Reserved + v.extend([0u8; 2 + 2 * 4]); + + // Matrix + v.extend( + [ + (1u32 << 16).to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + (1u32 << 16).to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + (16384u32 << 16).to_be_bytes(), + ] + .into_iter() + .flatten(), + ); + + // Pre defined + v.extend([0u8; 6 * 4]); + + // Next track id + v.extend((cfg.streams.len() as u32 + 1).to_be_bytes()); + + Ok(()) +} + +const TKHD_FLAGS_TRACK_ENABLED: u32 = 0x1; +const TKHD_FLAGS_TRACK_IN_MOVIE: u32 = 0x2; +const TKHD_FLAGS_TRACK_IN_PREVIEW: u32 = 0x4; + +struct TrackReference { + reference_type: [u8; 4], + track_ids: Vec, +} + +fn write_trak( + v: &mut Vec, + cfg: &super::HeaderConfiguration, + idx: usize, + stream: &super::HeaderStream, + creation_time: u64, + references: &[TrackReference], +) -> Result<(), Error> { + write_full_box( + v, + b"tkhd", + FULL_BOX_VERSION_1, + TKHD_FLAGS_TRACK_ENABLED | TKHD_FLAGS_TRACK_IN_MOVIE | TKHD_FLAGS_TRACK_IN_PREVIEW, + |v| write_tkhd(v, cfg, idx, stream, creation_time), + )?; + + // TODO: write edts if necessary: for audio tracks to remove initialization samples + // TODO: write edts optionally for negative DTS instead of offsetting the DTS + + write_box(v, b"mdia", |v| write_mdia(v, cfg, stream, creation_time))?; + + if !references.is_empty() { + write_box(v, b"tref", |v| write_tref(v, cfg, references))?; + } + + Ok(()) +} + +fn write_tkhd( + v: &mut Vec, + _cfg: &super::HeaderConfiguration, + idx: usize, + stream: &super::HeaderStream, + creation_time: u64, +) -> Result<(), Error> { + // Creation time + v.extend(creation_time.to_be_bytes()); + // Modification time + v.extend(creation_time.to_be_bytes()); + // Track ID + v.extend((idx as u32 + 1).to_be_bytes()); + // Reserved + v.extend(0u32.to_be_bytes()); + // Duration + v.extend(0u64.to_be_bytes()); + + // Reserved + v.extend([0u8; 2 * 4]); + + // Layer + v.extend(0u16.to_be_bytes()); + // Alternate group + v.extend(0u16.to_be_bytes()); + + // Volume + let s = stream.caps.structure(0).unwrap(); + match s.name().as_str() { + "audio/mpeg" | "audio/x-opus" | "audio/x-alaw" | "audio/x-mulaw" | "audio/x-adpcm" => { + v.extend((1u16 << 8).to_be_bytes()) + } + _ => v.extend(0u16.to_be_bytes()), + } + + // Reserved + v.extend([0u8; 2]); + + // Matrix + v.extend( + [ + (1u32 << 16).to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + (1u32 << 16).to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + 0u32.to_be_bytes(), + (16384u32 << 16).to_be_bytes(), + ] + .into_iter() + .flatten(), + ); + + // Width/height + match s.name().as_str() { + "video/x-h264" | "video/x-h265" | "video/x-vp8" | "video/x-vp9" | "video/x-av1" + | "image/jpeg" => { + let width = s.get::("width").context("video caps without width")? as u32; + let height = s + .get::("height") + .context("video caps without height")? as u32; + let par = s + .get::("pixel-aspect-ratio") + .unwrap_or_else(|_| gst::Fraction::new(1, 1)); + + let width = std::cmp::min( + width + .mul_div_round(par.numer() as u32, par.denom() as u32) + .unwrap_or(u16::MAX as u32), + u16::MAX as u32, + ); + let height = std::cmp::min(height, u16::MAX as u32); + + v.extend((width << 16).to_be_bytes()); + v.extend((height << 16).to_be_bytes()); + } + _ => v.extend([0u8; 2 * 4]), + } + + Ok(()) +} + +fn write_mdia( + v: &mut Vec, + cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, + creation_time: u64, +) -> Result<(), Error> { + write_full_box(v, b"mdhd", FULL_BOX_VERSION_1, FULL_BOX_FLAGS_NONE, |v| { + write_mdhd(v, cfg, stream, creation_time) + })?; + write_full_box(v, b"hdlr", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_hdlr(v, cfg, stream) + })?; + + // TODO: write elng if needed + + write_box(v, b"minf", |v| write_minf(v, cfg, stream))?; + + Ok(()) +} + +fn write_tref( + v: &mut Vec, + _cfg: &super::HeaderConfiguration, + references: &[TrackReference], +) -> Result<(), Error> { + for reference in references { + write_box(v, reference.reference_type, |v| { + for track_id in &reference.track_ids { + v.extend(track_id.to_be_bytes()); + } + + Ok(()) + })?; + } + + Ok(()) +} + +fn language_code(lang: impl std::borrow::Borrow<[u8; 3]>) -> u16 { + let lang = lang.borrow(); + + // TODO: Need to relax this once we get the language code from tags + assert!(lang.iter().all(u8::is_ascii_lowercase)); + + (((lang[0] as u16 - 0x60) & 0x1F) << 10) + + (((lang[1] as u16 - 0x60) & 0x1F) << 5) + + ((lang[2] as u16 - 0x60) & 0x1F) +} + +fn write_mdhd( + v: &mut Vec, + _cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, + creation_time: u64, +) -> Result<(), Error> { + // Creation time + v.extend(creation_time.to_be_bytes()); + // Modification time + v.extend(creation_time.to_be_bytes()); + // Timescale + v.extend(header_stream_to_timescale(stream).to_be_bytes()); + // Duration + v.extend(0u64.to_be_bytes()); + + // Language as ISO-639-2/T + // TODO: get actual language from the tags + v.extend(language_code(b"und").to_be_bytes()); + + // Pre-defined + v.extend([0u8; 2]); + + Ok(()) +} + +fn write_hdlr( + v: &mut Vec, + _cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, +) -> Result<(), Error> { + // Pre-defined + v.extend([0u8; 4]); + + let s = stream.caps.structure(0).unwrap(); + let (handler_type, name) = match s.name().as_str() { + "video/x-h264" | "video/x-h265" | "video/x-vp8" | "video/x-vp9" | "video/x-av1" + | "image/jpeg" => (b"vide", b"VideoHandler\0".as_slice()), + "audio/mpeg" | "audio/x-opus" | "audio/x-alaw" | "audio/x-mulaw" | "audio/x-adpcm" => { + (b"soun", b"SoundHandler\0".as_slice()) + } + "application/x-onvif-metadata" => (b"meta", b"MetadataHandler\0".as_slice()), + _ => unreachable!(), + }; + + // Handler type + v.extend(handler_type); + + // Reserved + v.extend([0u8; 3 * 4]); + + // Name + v.extend(name); + + Ok(()) +} + +fn write_minf( + v: &mut Vec, + cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, +) -> Result<(), Error> { + let s = stream.caps.structure(0).unwrap(); + + match s.name().as_str() { + "video/x-h264" | "video/x-h265" | "video/x-vp8" | "video/x-vp9" | "video/x-av1" + | "image/jpeg" => { + // Flags are always 1 for unspecified reasons + write_full_box(v, b"vmhd", FULL_BOX_VERSION_0, 1, |v| write_vmhd(v, cfg))? + } + "audio/mpeg" | "audio/x-opus" | "audio/x-alaw" | "audio/x-mulaw" | "audio/x-adpcm" => { + write_full_box(v, b"smhd", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_smhd(v, cfg) + })? + } + "application/x-onvif-metadata" => { + write_full_box(v, b"nmhd", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |_v| { + Ok(()) + })? + } + _ => unreachable!(), + } + + write_box(v, b"dinf", |v| write_dinf(v, cfg))?; + + write_box(v, b"stbl", |v| write_stbl(v, cfg, stream))?; + + Ok(()) +} + +fn write_vmhd(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Graphics mode + v.extend([0u8; 2]); + + // opcolor + v.extend([0u8; 2 * 3]); + + Ok(()) +} + +fn write_smhd(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Balance + v.extend([0u8; 2]); + + // Reserved + v.extend([0u8; 2]); + + Ok(()) +} + +fn write_dinf(v: &mut Vec, cfg: &super::HeaderConfiguration) -> Result<(), Error> { + write_full_box(v, b"dref", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_dref(v, cfg) + })?; + + Ok(()) +} + +const DREF_FLAGS_MEDIA_IN_SAME_FILE: u32 = 0x1; + +fn write_dref(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Entry count + v.extend(1u32.to_be_bytes()); + + write_full_box( + v, + b"url ", + FULL_BOX_VERSION_0, + DREF_FLAGS_MEDIA_IN_SAME_FILE, + |_v| Ok(()), + )?; + + Ok(()) +} + +fn write_stbl( + v: &mut Vec, + cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, +) -> Result<(), Error> { + write_full_box(v, b"stsd", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_stsd(v, cfg, stream) + })?; + write_full_box(v, b"stts", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_stts(v, cfg) + })?; + write_full_box(v, b"stsc", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_stsc(v, cfg) + })?; + write_full_box(v, b"stsz", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_stsz(v, cfg) + })?; + + write_full_box(v, b"stco", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_stco(v, cfg) + })?; + + // For video write a sync sample box as indication that not all samples are sync samples + if !stream.delta_frames.intra_only() { + write_full_box(v, b"stss", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_stss(v, cfg) + })? + } + + Ok(()) +} + +fn write_stsd( + v: &mut Vec, + cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, +) -> Result<(), Error> { + // Entry count + v.extend(1u32.to_be_bytes()); + + let s = stream.caps.structure(0).unwrap(); + match s.name().as_str() { + "video/x-h264" | "video/x-h265" | "video/x-vp8" | "video/x-vp9" | "video/x-av1" + | "image/jpeg" => write_visual_sample_entry(v, cfg, stream)?, + "audio/mpeg" | "audio/x-opus" | "audio/x-alaw" | "audio/x-mulaw" | "audio/x-adpcm" => { + write_audio_sample_entry(v, cfg, stream)? + } + "application/x-onvif-metadata" => write_xml_meta_data_sample_entry(v, cfg, stream)?, + _ => unreachable!(), + } + + Ok(()) +} + +fn write_sample_entry_box) -> Result>( + v: &mut Vec, + fourcc: impl std::borrow::Borrow<[u8; 4]>, + content_func: F, +) -> Result { + write_box(v, fourcc, move |v| { + // Reserved + v.extend([0u8; 6]); + + // Data reference index + v.extend(1u16.to_be_bytes()); + + content_func(v) + }) +} + +fn write_visual_sample_entry( + v: &mut Vec, + _cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, +) -> Result<(), Error> { + let s = stream.caps.structure(0).unwrap(); + let fourcc = match s.name().as_str() { + "video/x-h264" => { + let stream_format = s.get::<&str>("stream-format").context("no stream-format")?; + match stream_format { + "avc" => b"avc1", + "avc3" => b"avc3", + _ => unreachable!(), + } + } + "video/x-h265" => { + let stream_format = s.get::<&str>("stream-format").context("no stream-format")?; + match stream_format { + "hvc1" => b"hvc1", + "hev1" => b"hev1", + _ => unreachable!(), + } + } + "image/jpeg" => b"jpeg", + "video/x-vp8" => b"vp08", + "video/x-vp9" => b"vp09", + "video/x-av1" => b"av01", + _ => unreachable!(), + }; + + write_sample_entry_box(v, fourcc, move |v| { + // pre-defined + v.extend([0u8; 2]); + // Reserved + v.extend([0u8; 2]); + // pre-defined + v.extend([0u8; 3 * 4]); + + // Width + let width = + u16::try_from(s.get::("width").context("no width")?).context("too big width")?; + v.extend(width.to_be_bytes()); + + // Height + let height = u16::try_from(s.get::("height").context("no height")?) + .context("too big height")?; + v.extend(height.to_be_bytes()); + + // Horizontal resolution + v.extend(0x00480000u32.to_be_bytes()); + + // Vertical resolution + v.extend(0x00480000u32.to_be_bytes()); + + // Reserved + v.extend([0u8; 4]); + + // Frame count + v.extend(1u16.to_be_bytes()); + + // Compressor name + v.extend([0u8; 32]); + + // Depth + v.extend(0x0018u16.to_be_bytes()); + + // Pre-defined + v.extend((-1i16).to_be_bytes()); + + // Codec specific boxes + match s.name().as_str() { + "video/x-h264" => { + let codec_data = s + .get::<&gst::BufferRef>("codec_data") + .context("no codec_data")?; + let map = codec_data + .map_readable() + .context("codec_data not mappable")?; + write_box(v, b"avcC", move |v| { + v.extend_from_slice(&map); + Ok(()) + })?; + } + "video/x-h265" => { + let codec_data = s + .get::<&gst::BufferRef>("codec_data") + .context("no codec_data")?; + let map = codec_data + .map_readable() + .context("codec_data not mappable")?; + write_box(v, b"hvcC", move |v| { + v.extend_from_slice(&map); + Ok(()) + })?; + } + "video/x-vp9" => { + let profile: u8 = match s.get::<&str>("profile").expect("no vp9 profile") { + "0" => Some(0), + "1" => Some(1), + "2" => Some(2), + "3" => Some(3), + _ => None, + } + .context("unsupported vp9 profile")?; + let colorimetry = gst_video::VideoColorimetry::from_str( + s.get::<&str>("colorimetry").expect("no colorimetry"), + ) + .context("failed to parse colorimetry")?; + let video_full_range = + colorimetry.range() == gst_video::VideoColorRange::Range0_255; + let chroma_format: u8 = + match s.get::<&str>("chroma-format").expect("no chroma-format") { + "4:2:0" => + // chroma-site is optional + { + match s + .get::<&str>("chroma-site") + .ok() + .and_then(|cs| gst_video::VideoChromaSite::from_str(cs).ok()) + { + Some(gst_video::VideoChromaSite::V_COSITED) => Some(0), + // COSITED + _ => Some(1), + } + } + "4:2:2" => Some(2), + "4:4:4" => Some(3), + _ => None, + } + .context("unsupported chroma-format")?; + let bit_depth: u8 = { + let bit_depth_luma = s.get::("bit-depth-luma").expect("no bit-depth-luma"); + let bit_depth_chroma = s + .get::("bit-depth-chroma") + .expect("no bit-depth-chroma"); + if bit_depth_luma != bit_depth_chroma { + return Err(anyhow!("bit-depth-luma and bit-depth-chroma have different values which is an unsupported configuration")); + } + bit_depth_luma as u8 + }; + write_full_box(v, b"vpcC", 1, 0, move |v| { + v.push(profile); + // XXX: hardcoded level 1 + v.push(10); + let mut byte: u8 = 0; + byte |= (bit_depth & 0xF) << 4; + byte |= (chroma_format & 0x7) << 1; + byte |= video_full_range as u8; + v.push(byte); + v.push(colorimetry.primaries().to_iso() as u8); + v.push(colorimetry.transfer().to_iso() as u8); + v.push(colorimetry.matrix().to_iso() as u8); + // 16-bit length field for codec initialization, unused + v.push(0); + v.push(0); + Ok(()) + })?; + } + "video/x-av1" => { + write_box(v, b"av1C", move |v| { + if let Ok(codec_data) = s.get::<&gst::BufferRef>("codec_data") { + let map = codec_data + .map_readable() + .context("codec_data not mappable")?; + + v.extend_from_slice(&map); + } else { + let presentation_delay_minus_one = + if let Ok(presentation_delay) = s.get::("presentation-delay") { + Some( + (1u8 << 5) + | std::cmp::max( + 0xF, + (presentation_delay.saturating_sub(1) & 0xF) as u8, + ), + ) + } else { + None + }; + + let profile = match s.get::<&str>("profile").unwrap() { + "main" => 0, + "high" => 1, + "professional" => 2, + _ => unreachable!(), + }; + + let level = 1; // FIXME + let tier = 0; // FIXME + let (high_bitdepth, twelve_bit) = + match s.get::("bit-depth-luma").unwrap() { + 8 => (false, false), + 10 => (true, false), + 12 => (true, true), + _ => unreachable!(), + }; + let (monochrome, chroma_sub_x, chroma_sub_y) = + match s.get::<&str>("chroma-format").unwrap() { + "4:0:0" => (true, true, true), + "4:2:0" => (false, true, true), + "4:2:2" => (false, true, false), + "4:4:4" => (false, false, false), + _ => unreachable!(), + }; + + let chrome_sample_position = match s.get::<&str>("chroma-site") { + Ok("v-cosited") => 1, + Ok("v-cosited+h-cosited") => 2, + _ => 0, + }; + + let codec_data = [ + 0x80 | 0x01, // marker | version + (profile << 5) | level, // profile | level + (tier << 7) + | ((high_bitdepth as u8) << 6) + | ((twelve_bit as u8) << 5) + | ((monochrome as u8) << 4) + | ((chroma_sub_x as u8) << 3) + | ((chroma_sub_y as u8) << 2) + | chrome_sample_position, // tier | high bitdepth | twelve bit | monochrome | chroma sub x | + // chroma sub y | chroma sample position + if let Some(presentation_delay_minus_one) = presentation_delay_minus_one + { + 0x10 | presentation_delay_minus_one // reserved | presentation delay present | presentation delay + } else { + 0 + }, + ]; + + v.extend_from_slice(&codec_data); + } + + Ok(()) + })?; + } + "video/x-vp8" | "image/jpeg" => { + // Nothing to do here + } + _ => unreachable!(), + } + + if let Ok(par) = s.get::("pixel-aspect-ratio") { + write_box(v, b"pasp", move |v| { + v.extend((par.numer() as u32).to_be_bytes()); + v.extend((par.denom() as u32).to_be_bytes()); + Ok(()) + })?; + } + + if let Some(colorimetry) = s + .get::<&str>("colorimetry") + .ok() + .and_then(|c| c.parse::().ok()) + { + write_box(v, b"colr", move |v| { + v.extend(b"nclx"); + let (primaries, transfer, matrix) = { + ( + (colorimetry.primaries().to_iso() as u16), + (colorimetry.transfer().to_iso() as u16), + (colorimetry.matrix().to_iso() as u16), + ) + }; + + let full_range = match colorimetry.range() { + gst_video::VideoColorRange::Range0_255 => 0x80u8, + gst_video::VideoColorRange::Range16_235 => 0x00u8, + _ => 0x00, + }; + + v.extend(primaries.to_be_bytes()); + v.extend(transfer.to_be_bytes()); + v.extend(matrix.to_be_bytes()); + v.push(full_range); + + Ok(()) + })?; + } + + if let Ok(cll) = gst_video::VideoContentLightLevel::from_caps(&stream.caps) { + write_box(v, b"clli", move |v| { + v.extend((cll.max_content_light_level()).to_be_bytes()); + v.extend((cll.max_frame_average_light_level()).to_be_bytes()); + Ok(()) + })?; + } + + if let Ok(mastering) = gst_video::VideoMasteringDisplayInfo::from_caps(&stream.caps) { + write_box(v, b"mdcv", move |v| { + for primary in mastering.display_primaries() { + v.extend(primary.x.to_be_bytes()); + v.extend(primary.y.to_be_bytes()); + } + v.extend(mastering.white_point().x.to_be_bytes()); + v.extend(mastering.white_point().y.to_be_bytes()); + v.extend(mastering.max_display_mastering_luminance().to_be_bytes()); + v.extend(mastering.max_display_mastering_luminance().to_be_bytes()); + Ok(()) + })?; + } + + // Write fiel box for codecs that require it + if ["image/jpeg"].contains(&s.name().as_str()) { + let interlace_mode = s + .get::<&str>("interlace-mode") + .ok() + .map(gst_video::VideoInterlaceMode::from_string) + .unwrap_or(gst_video::VideoInterlaceMode::Progressive); + let field_order = s + .get::<&str>("field-order") + .ok() + .map(gst_video::VideoFieldOrder::from_string) + .unwrap_or(gst_video::VideoFieldOrder::Unknown); + + write_box(v, b"fiel", move |v| { + let (interlace, field_order) = match interlace_mode { + gst_video::VideoInterlaceMode::Progressive => (1, 0), + gst_video::VideoInterlaceMode::Interleaved + if field_order == gst_video::VideoFieldOrder::TopFieldFirst => + { + (2, 9) + } + gst_video::VideoInterlaceMode::Interleaved => (2, 14), + _ => (0, 0), + }; + + v.push(interlace); + v.push(field_order); + Ok(()) + })?; + } + + // TODO: write btrt bitrate box based on tags + + Ok(()) + })?; + + Ok(()) +} + +fn write_audio_sample_entry( + v: &mut Vec, + _cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, +) -> Result<(), Error> { + let s = stream.caps.structure(0).unwrap(); + let fourcc = match s.name().as_str() { + "audio/mpeg" => b"mp4a", + "audio/x-opus" => b"Opus", + "audio/x-alaw" => b"alaw", + "audio/x-mulaw" => b"ulaw", + "audio/x-adpcm" => { + let layout = s.get::<&str>("layout").context("no ADPCM layout field")?; + + match layout { + "g726" => b"ms\x00\x45", + _ => unreachable!(), + } + } + _ => unreachable!(), + }; + + let sample_size = match s.name().as_str() { + "audio/x-adpcm" => { + let bitrate = s.get::("bitrate").context("no ADPCM bitrate field")?; + (bitrate / 8000) as u16 + } + _ => 16u16, + }; + + write_sample_entry_box(v, fourcc, move |v| { + // Reserved + v.extend([0u8; 2 * 4]); + + // Channel count + let channels = u16::try_from(s.get::("channels").context("no channels")?) + .context("too many channels")?; + v.extend(channels.to_be_bytes()); + + // Sample size + v.extend(sample_size.to_be_bytes()); + + // Pre-defined + v.extend([0u8; 2]); + + // Reserved + v.extend([0u8; 2]); + + // Sample rate + let rate = u16::try_from(s.get::("rate").context("no rate")?).unwrap_or(0); + v.extend((u32::from(rate) << 16).to_be_bytes()); + + // Codec specific boxes + match s.name().as_str() { + "audio/mpeg" => { + let codec_data = s + .get::<&gst::BufferRef>("codec_data") + .context("no codec_data")?; + let map = codec_data + .map_readable() + .context("codec_data not mappable")?; + if map.len() < 2 { + bail!("too small codec_data"); + } + write_esds_aac(v, &map)?; + } + "audio/x-opus" => { + write_dops(v, &stream.caps)?; + } + "audio/x-alaw" | "audio/x-mulaw" | "audio/x-adpcm" => { + // Nothing to do here + } + _ => unreachable!(), + } + + // If rate did not fit into 16 bits write a full `srat` box + if rate == 0 { + let rate = s.get::("rate").context("no rate")?; + // FIXME: This is defined as full box? + write_full_box( + v, + b"srat", + FULL_BOX_VERSION_0, + FULL_BOX_FLAGS_NONE, + move |v| { + v.extend((rate as u32).to_be_bytes()); + Ok(()) + }, + )?; + } + + // TODO: write btrt bitrate box based on tags + + // TODO: chnl box for channel ordering? probably not needed for AAC + + Ok(()) + })?; + + Ok(()) +} + +fn write_esds_aac(v: &mut Vec, codec_data: &[u8]) -> Result<(), Error> { + let calculate_len = |mut len| { + if len > 260144641 { + bail!("too big descriptor length"); + } + + if len == 0 { + return Ok(([0; 4], 1)); + } + + let mut idx = 0; + let mut lens = [0u8; 4]; + while len > 0 { + lens[idx] = ((if len > 0x7f { 0x80 } else { 0x00 }) | (len & 0x7f)) as u8; + idx += 1; + len >>= 7; + } + + Ok((lens, idx)) + }; + + write_full_box( + v, + b"esds", + FULL_BOX_VERSION_0, + FULL_BOX_FLAGS_NONE, + move |v| { + // Calculate all lengths bottom up + + // Decoder specific info + let decoder_specific_info_len = calculate_len(codec_data.len())?; + + // Decoder config + let decoder_config_len = + calculate_len(13 + 1 + decoder_specific_info_len.1 + codec_data.len())?; + + // SL config + let sl_config_len = calculate_len(1)?; + + // ES descriptor + let es_descriptor_len = calculate_len( + 3 + 1 + + decoder_config_len.1 + + 13 + + 1 + + decoder_specific_info_len.1 + + codec_data.len() + + 1 + + sl_config_len.1 + + 1, + )?; + + // ES descriptor tag + v.push(0x03); + + // Length + v.extend_from_slice(&es_descriptor_len.0[..(es_descriptor_len.1)]); + + // Track ID + v.extend(1u16.to_be_bytes()); + // Flags + v.push(0u8); + + // Decoder config descriptor + v.push(0x04); + + // Length + v.extend_from_slice(&decoder_config_len.0[..(decoder_config_len.1)]); + + // Object type ESDS_OBJECT_TYPE_MPEG4_P3 + v.push(0x40); + // Stream type ESDS_STREAM_TYPE_AUDIO + v.push((0x05 << 2) | 0x01); + + // Buffer size db? + v.extend([0u8; 3]); + + // Max bitrate + v.extend(0u32.to_be_bytes()); + + // Avg bitrate + v.extend(0u32.to_be_bytes()); + + // Decoder specific info + v.push(0x05); + + // Length + v.extend_from_slice(&decoder_specific_info_len.0[..(decoder_specific_info_len.1)]); + v.extend_from_slice(codec_data); + + // SL config descriptor + v.push(0x06); + + // Length: 1 (tag) + 1 (length) + 1 (predefined) + v.extend_from_slice(&sl_config_len.0[..(sl_config_len.1)]); + + // Predefined + v.push(0x02); + Ok(()) + }, + ) +} + +fn write_dops(v: &mut Vec, caps: &gst::Caps) -> Result<(), Error> { + let rate; + let channels; + let channel_mapping_family; + let stream_count; + let coupled_count; + let pre_skip; + let output_gain; + let mut channel_mapping = [0; 256]; + + // TODO: Use audio clipping meta to calculate pre_skip + + if let Some(header) = caps + .structure(0) + .unwrap() + .get::("streamheader") + .ok() + .and_then(|a| a.get(0).and_then(|v| v.get::().ok())) + { + ( + rate, + channels, + channel_mapping_family, + stream_count, + coupled_count, + pre_skip, + output_gain, + ) = gst_pbutils::codec_utils_opus_parse_header(&header, Some(&mut channel_mapping)) + .unwrap(); + } else { + ( + rate, + channels, + channel_mapping_family, + stream_count, + coupled_count, + ) = gst_pbutils::codec_utils_opus_parse_caps(caps, Some(&mut channel_mapping)).unwrap(); + output_gain = 0; + pre_skip = 0; + } + + write_box(v, b"dOps", move |v| { + // Version number + v.push(0); + v.push(channels); + v.extend(pre_skip.to_be_bytes()); + v.extend(rate.to_be_bytes()); + v.extend(output_gain.to_be_bytes()); + v.push(channel_mapping_family); + if channel_mapping_family > 0 { + v.push(stream_count); + v.push(coupled_count); + v.extend(&channel_mapping[..channels as usize]); + } + + Ok(()) + }) +} + +fn write_xml_meta_data_sample_entry( + v: &mut Vec, + _cfg: &super::HeaderConfiguration, + stream: &super::HeaderStream, +) -> Result<(), Error> { + let s = stream.caps.structure(0).unwrap(); + let namespace = match s.name().as_str() { + "application/x-onvif-metadata" => b"http://www.onvif.org/ver10/schema", + _ => unreachable!(), + }; + + write_sample_entry_box(v, b"metx", move |v| { + // content_encoding, empty string + v.push(0); + + // namespace + v.extend_from_slice(namespace); + v.push(0); + + // schema_location, empty string list + v.push(0); + + Ok(()) + })?; + + Ok(()) +} + +fn write_stts(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Entry count + v.extend(0u32.to_be_bytes()); + + Ok(()) +} + +fn write_stsc(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Entry count + v.extend(0u32.to_be_bytes()); + + Ok(()) +} + +fn write_stsz(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Sample size + v.extend(0u32.to_be_bytes()); + + // Sample count + v.extend(0u32.to_be_bytes()); + + Ok(()) +} + +fn write_stco(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Entry count + v.extend(0u32.to_be_bytes()); + + Ok(()) +} + +fn write_stss(v: &mut Vec, _cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Entry count + v.extend(0u32.to_be_bytes()); + + Ok(()) +} + +fn write_mvex(v: &mut Vec, cfg: &super::HeaderConfiguration) -> Result<(), Error> { + if cfg.write_mehd { + if cfg.update && cfg.duration.is_some() { + write_full_box(v, b"mehd", FULL_BOX_VERSION_1, FULL_BOX_FLAGS_NONE, |v| { + write_mehd(v, cfg) + })?; + } else { + write_box(v, b"free", |v| { + // version/flags of full box + v.extend(0u32.to_be_bytes()); + // mehd duration + v.extend(0u64.to_be_bytes()); + + Ok(()) + })?; + } + } + + for (idx, _stream) in cfg.streams.iter().enumerate() { + write_full_box(v, b"trex", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_trex(v, cfg, idx) + })?; + } + + Ok(()) +} + +fn write_mehd(v: &mut Vec, cfg: &super::HeaderConfiguration) -> Result<(), Error> { + // Use the reference track timescale + let timescale = header_configuration_to_timescale(cfg); + + let duration = cfg + .duration + .expect("no duration") + .mul_div_ceil(timescale as u64, gst::ClockTime::SECOND.nseconds()) + .context("too long duration")?; + + // Media duration in mvhd.timescale units + v.extend(duration.to_be_bytes()); + + Ok(()) +} + +fn write_trex(v: &mut Vec, _cfg: &super::HeaderConfiguration, idx: usize) -> Result<(), Error> { + // Track ID + v.extend((idx as u32 + 1).to_be_bytes()); + + // Default sample description index + v.extend(1u32.to_be_bytes()); + + // Default sample duration + v.extend(0u32.to_be_bytes()); + + // Default sample size + v.extend(0u32.to_be_bytes()); + + // Default sample flags + v.extend(0u32.to_be_bytes()); + + // Default sample duration/size/etc will be provided in the traf/trun if one can be determined + // for a whole fragment + + Ok(()) +} + +/// Creates `styp` and `moof` boxes and `mdat` header +pub(super) fn create_fmp4_fragment_header( + cfg: super::FragmentHeaderConfiguration, +) -> Result<(gst::Buffer, u64), Error> { + let mut v = vec![]; + + // Don't write a `styp` if this is only a chunk. + if !cfg.chunk { + let (brand, compatible_brands) = + brands_from_variant_and_caps(cfg.variant, cfg.streams.iter().map(|s| &s.caps)); + + write_box(&mut v, b"styp", |v| { + // major brand + v.extend(brand); + // minor version + v.extend(0u32.to_be_bytes()); + // compatible brands + v.extend(compatible_brands.into_iter().flatten()); + + Ok(()) + })?; + } + + let styp_len = v.len(); + + let data_offset_offsets = write_box(&mut v, b"moof", |v| write_moof(v, &cfg))?; + + let size = cfg + .buffers + .iter() + .map(|buffer| buffer.buffer.size() as u64) + .sum::(); + if let Ok(size) = u32::try_from(size + 8) { + v.extend(size.to_be_bytes()); + v.extend(b"mdat"); + } else { + v.extend(1u32.to_be_bytes()); + v.extend(b"mdat"); + v.extend((size + 16).to_be_bytes()); + } + + let data_offset = v.len() - styp_len; + for data_offset_offset in data_offset_offsets { + let val = u32::from_be_bytes(v[data_offset_offset..][..4].try_into()?) + .checked_add(u32::try_from(data_offset)?) + .ok_or_else(|| anyhow!("can't calculate track run data offset"))?; + v[data_offset_offset..][..4].copy_from_slice(&val.to_be_bytes()); + } + + Ok((gst::Buffer::from_mut_slice(v), styp_len as u64)) +} + +fn write_moof( + v: &mut Vec, + cfg: &super::FragmentHeaderConfiguration, +) -> Result, Error> { + write_full_box(v, b"mfhd", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + write_mfhd(v, cfg) + })?; + + let mut data_offset_offsets = vec![]; + for (idx, stream) in cfg.streams.iter().enumerate() { + // Skip tracks without any buffers for this fragment. + if stream.start_time.is_none() { + continue; + } + + write_box(v, b"traf", |v| { + write_traf(v, cfg, &mut data_offset_offsets, idx, stream) + })?; + } + + Ok(data_offset_offsets) +} + +fn write_mfhd(v: &mut Vec, cfg: &super::FragmentHeaderConfiguration) -> Result<(), Error> { + v.extend(cfg.sequence_number.to_be_bytes()); + + Ok(()) +} + +#[allow(clippy::identity_op)] +#[allow(clippy::bool_to_int_with_if)] +fn sample_flags_from_buffer(stream: &super::FragmentHeaderStream, buffer: &gst::BufferRef) -> u32 { + if stream.delta_frames.intra_only() { + (0b00u32 << (16 + 10)) | // leading: unknown + (0b10u32 << (16 + 8)) | // depends: no + (0b10u32 << (16 + 6)) | // depended: no + (0b00u32 << (16 + 4)) | // redundancy: unknown + (0b000u32 << (16 + 1)) | // padding: no + (0b0u32 << 16) | // non-sync-sample: no + (0u32) // degradation priority + } else { + let depends = if buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) { + 0b01u32 + } else { + 0b10u32 + }; + let depended = if buffer.flags().contains(gst::BufferFlags::DROPPABLE) { + 0b10u32 + } else { + 0b00u32 + }; + let non_sync_sample = if buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) { + 0b1u32 + } else { + 0b0u32 + }; + + (0b00u32 << (16 + 10)) | // leading: unknown + (depends << (16 + 8)) | // depends + (depended << (16 + 6)) | // depended + (0b00u32 << (16 + 4)) | // redundancy: unknown + (0b000u32 << (16 + 1)) | // padding: no + (non_sync_sample << 16) | // non-sync-sample + (0u32) // degradation priority + } +} + +const DEFAULT_SAMPLE_DURATION_PRESENT: u32 = 0x08; +const DEFAULT_SAMPLE_SIZE_PRESENT: u32 = 0x10; +const DEFAULT_SAMPLE_FLAGS_PRESENT: u32 = 0x20; +const DEFAULT_BASE_IS_MOOF: u32 = 0x2_00_00; + +const DATA_OFFSET_PRESENT: u32 = 0x0_01; +const FIRST_SAMPLE_FLAGS_PRESENT: u32 = 0x0_04; +const SAMPLE_DURATION_PRESENT: u32 = 0x1_00; +const SAMPLE_SIZE_PRESENT: u32 = 0x2_00; +const SAMPLE_FLAGS_PRESENT: u32 = 0x4_00; +const SAMPLE_COMPOSITION_TIME_OFFSET_PRESENT: u32 = 0x8_00; + +#[allow(clippy::type_complexity)] +fn analyze_buffers( + cfg: &super::FragmentHeaderConfiguration, + idx: usize, + stream: &super::FragmentHeaderStream, + timescale: u32, +) -> Result< + ( + // tf_flags + u32, + // tr_flags + u32, + // default size + Option, + // default duration + Option, + // default flags + Option, + // negative composition time offsets + bool, + ), + Error, +> { + let mut tf_flags = DEFAULT_BASE_IS_MOOF; + let mut tr_flags = DATA_OFFSET_PRESENT; + + let mut duration = None; + let mut size = None; + let mut first_buffer_flags = None; + let mut flags = None; + + let mut negative_composition_time_offsets = false; + + for Buffer { + idx: _idx, + buffer, + timestamp: _timestamp, + duration: sample_duration, + composition_time_offset, + } in cfg.buffers.iter().filter(|b| b.idx == idx) + { + if size.is_none() { + size = Some(buffer.size() as u32); + } + if Some(buffer.size() as u32) != size { + tr_flags |= SAMPLE_SIZE_PRESENT; + } + + let sample_duration = u32::try_from( + sample_duration + .nseconds() + .mul_div_round(timescale as u64, gst::ClockTime::SECOND.nseconds()) + .context("too big sample duration")?, + ) + .context("too big sample duration")?; + + if duration.is_none() { + duration = Some(sample_duration); + } + if Some(sample_duration) != duration { + tr_flags |= SAMPLE_DURATION_PRESENT; + } + + let f = sample_flags_from_buffer(stream, buffer); + if first_buffer_flags.is_none() { + // First buffer, remember as first buffer flags + first_buffer_flags = Some(f); + } else if flags.is_none() { + // Second buffer, remember as general flags and if they're + // different from the first buffer's flags then also remember + // that + flags = Some(f); + if Some(f) != first_buffer_flags { + tr_flags |= FIRST_SAMPLE_FLAGS_PRESENT; + } + } else if Some(f) != flags { + // Third or later buffer, and the flags are different than the second buffer's flags. + // In that case each sample will have to store its own flags. + tr_flags &= !FIRST_SAMPLE_FLAGS_PRESENT; + tr_flags |= SAMPLE_FLAGS_PRESENT; + } + + if let Some(composition_time_offset) = *composition_time_offset { + assert!(stream.delta_frames.requires_dts()); + if composition_time_offset != 0 { + tr_flags |= SAMPLE_COMPOSITION_TIME_OFFSET_PRESENT; + } + if composition_time_offset < 0 { + negative_composition_time_offsets = true; + } + } + } + + if (tr_flags & SAMPLE_SIZE_PRESENT) == 0 { + tf_flags |= DEFAULT_SAMPLE_SIZE_PRESENT; + } else { + size = None; + } + + if (tr_flags & SAMPLE_DURATION_PRESENT) == 0 { + tf_flags |= DEFAULT_SAMPLE_DURATION_PRESENT; + } else { + duration = None; + } + + // If there is only a single buffer use its flags as default sample flags + // instead of first sample flags. + if flags.is_none() && first_buffer_flags.is_some() { + tr_flags &= !FIRST_SAMPLE_FLAGS_PRESENT; + flags = first_buffer_flags.take(); + } + + // If all but possibly the first buffer had the same flags then only store them once instead of + // with every single sample. + if (tr_flags & SAMPLE_FLAGS_PRESENT) == 0 { + tf_flags |= DEFAULT_SAMPLE_FLAGS_PRESENT; + } else { + flags = None; + } + + Ok(( + tf_flags, + tr_flags, + size, + duration, + flags, + negative_composition_time_offsets, + )) +} + +#[allow(clippy::ptr_arg)] +fn write_traf( + v: &mut Vec, + cfg: &super::FragmentHeaderConfiguration, + data_offset_offsets: &mut Vec, + idx: usize, + stream: &super::FragmentHeaderStream, +) -> Result<(), Error> { + let timescale = fragment_header_stream_to_timescale(stream); + + // Analyze all buffers to know what values can be put into the tfhd for all samples and what + // has to be stored for every single sample + let ( + tf_flags, + mut tr_flags, + default_size, + default_duration, + default_flags, + negative_composition_time_offsets, + ) = analyze_buffers(cfg, idx, stream, timescale)?; + + assert!((tf_flags & DEFAULT_SAMPLE_SIZE_PRESENT == 0) ^ default_size.is_some()); + assert!((tf_flags & DEFAULT_SAMPLE_DURATION_PRESENT == 0) ^ default_duration.is_some()); + assert!((tf_flags & DEFAULT_SAMPLE_FLAGS_PRESENT == 0) ^ default_flags.is_some()); + + write_full_box(v, b"tfhd", FULL_BOX_VERSION_0, tf_flags, |v| { + write_tfhd(v, cfg, idx, default_size, default_duration, default_flags) + })?; + write_full_box(v, b"tfdt", FULL_BOX_VERSION_1, FULL_BOX_FLAGS_NONE, |v| { + write_tfdt(v, cfg, idx, stream, timescale) + })?; + + let mut current_data_offset = 0; + + for run in GroupBy::new(cfg.buffers, |a: &Buffer, b: &Buffer| a.idx == b.idx) { + if run[0].idx != idx { + // FIXME: What to do with >4GB offsets? + current_data_offset = (current_data_offset as u64 + + run.iter().map(|b| b.buffer.size() as u64).sum::()) + .try_into()?; + continue; + } + + let data_offset_offset = write_full_box( + v, + b"trun", + if negative_composition_time_offsets { + FULL_BOX_VERSION_1 + } else { + FULL_BOX_VERSION_0 + }, + tr_flags, + |v| { + write_trun( + v, + cfg, + current_data_offset, + tr_flags, + timescale, + stream, + run, + ) + }, + )?; + data_offset_offsets.push(data_offset_offset); + + // FIXME: What to do with >4GB offsets? + current_data_offset = (current_data_offset as u64 + + run.iter().map(|b| b.buffer.size() as u64).sum::()) + .try_into()?; + + // Don't include first sample flags in any trun boxes except for the first + tr_flags &= !FIRST_SAMPLE_FLAGS_PRESENT; + } + + // TODO: saio, saiz, sbgp, sgpd, subs? + + Ok(()) +} + +fn write_tfhd( + v: &mut Vec, + _cfg: &super::FragmentHeaderConfiguration, + idx: usize, + default_size: Option, + default_duration: Option, + default_flags: Option, +) -> Result<(), Error> { + // Track ID + v.extend((idx as u32 + 1).to_be_bytes()); + + // No base data offset, no sample description index + + if let Some(default_duration) = default_duration { + v.extend(default_duration.to_be_bytes()); + } + + if let Some(default_size) = default_size { + v.extend(default_size.to_be_bytes()); + } + + if let Some(default_flags) = default_flags { + v.extend(default_flags.to_be_bytes()); + } + + Ok(()) +} + +fn write_tfdt( + v: &mut Vec, + _cfg: &super::FragmentHeaderConfiguration, + _idx: usize, + stream: &super::FragmentHeaderStream, + timescale: u32, +) -> Result<(), Error> { + let base_time = stream + .start_time + .unwrap() + .mul_div_floor(timescale as u64, gst::ClockTime::SECOND.nseconds()) + .context("base time overflow")?; + + v.extend(base_time.to_be_bytes()); + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +fn write_trun( + v: &mut Vec, + _cfg: &super::FragmentHeaderConfiguration, + current_data_offset: u32, + tr_flags: u32, + timescale: u32, + stream: &super::FragmentHeaderStream, + buffers: &[Buffer], +) -> Result { + // Sample count + v.extend((buffers.len() as u32).to_be_bytes()); + + let data_offset_offset = v.len(); + // Data offset, will be rewritten later + v.extend(current_data_offset.to_be_bytes()); + + if (tr_flags & FIRST_SAMPLE_FLAGS_PRESENT) != 0 { + v.extend(sample_flags_from_buffer(stream, &buffers[0].buffer).to_be_bytes()); + } + + for Buffer { + idx: _idx, + ref buffer, + timestamp: _timestamp, + duration, + composition_time_offset, + } in buffers.iter() + { + if (tr_flags & SAMPLE_DURATION_PRESENT) != 0 { + // Sample duration + let sample_duration = u32::try_from( + duration + .nseconds() + .mul_div_round(timescale as u64, gst::ClockTime::SECOND.nseconds()) + .context("too big sample duration")?, + ) + .context("too big sample duration")?; + v.extend(sample_duration.to_be_bytes()); + } + + if (tr_flags & SAMPLE_SIZE_PRESENT) != 0 { + // Sample size + v.extend((buffer.size() as u32).to_be_bytes()); + } + + if (tr_flags & SAMPLE_FLAGS_PRESENT) != 0 { + assert!((tr_flags & FIRST_SAMPLE_FLAGS_PRESENT) == 0); + + // Sample flags + v.extend(sample_flags_from_buffer(stream, buffer).to_be_bytes()); + } + + if (tr_flags & SAMPLE_COMPOSITION_TIME_OFFSET_PRESENT) != 0 { + // Sample composition time offset + let composition_time_offset = i32::try_from( + composition_time_offset + .unwrap_or(0) + .mul_div_round(timescale as i64, gst::ClockTime::SECOND.nseconds() as i64) + .context("too big composition time offset")?, + ) + .context("too big composition time offset")?; + v.extend(composition_time_offset.to_be_bytes()); + } + } + + Ok(data_offset_offset) +} + +/// Creates `mfra` box +pub(crate) fn create_mfra( + caps: &gst::CapsRef, + fragment_offsets: &[super::FragmentOffset], +) -> Result { + let timescale = caps_to_timescale(caps); + + let mut v = vec![]; + + let offset = write_box(&mut v, b"mfra", |v| { + write_full_box(v, b"tfra", FULL_BOX_VERSION_1, FULL_BOX_FLAGS_NONE, |v| { + // Track ID + v.extend(1u32.to_be_bytes()); + + // Reserved / length of traf/trun/sample + v.extend(0u32.to_be_bytes()); + + // Number of entries + v.extend( + u32::try_from(fragment_offsets.len()) + .context("too many fragments")? + .to_be_bytes(), + ); + + for super::FragmentOffset { time, offset } in fragment_offsets { + // Time + let time = time + .nseconds() + .mul_div_round(timescale as u64, gst::ClockTime::SECOND.nseconds()) + .context("time overflow")?; + v.extend(time.to_be_bytes()); + + // moof offset + v.extend(offset.to_be_bytes()); + + // traf/trun/sample number + v.extend_from_slice(&[1u8; 3][..]); + } + + Ok(()) + })?; + + let offset = write_full_box(v, b"mfro", FULL_BOX_VERSION_0, FULL_BOX_FLAGS_NONE, |v| { + let offset = v.len(); + // Parent size + v.extend(0u32.to_be_bytes()); + Ok(offset) + })?; + + Ok(offset) + })?; + + let len = u32::try_from(v.len() as u64).context("too big mfra")?; + v[offset..][..4].copy_from_slice(&len.to_be_bytes()); + + Ok(gst::Buffer::from_mut_slice(v)) +} + +// Copy from std while this is still nightly-only +use std::{fmt, str::FromStr}; + +/// An iterator over slice in (non-overlapping) chunks separated by a predicate. +/// +/// This struct is created by the [`group_by`] method on [slices]. +/// +/// [`group_by`]: slice::group_by +/// [slices]: slice +struct GroupBy<'a, T: 'a, P> { + slice: &'a [T], + predicate: P, +} + +impl<'a, T: 'a, P> GroupBy<'a, T, P> { + fn new(slice: &'a [T], predicate: P) -> Self { + GroupBy { slice, predicate } + } +} + +impl<'a, T: 'a, P> Iterator for GroupBy<'a, T, P> +where + P: FnMut(&T, &T) -> bool, +{ + type Item = &'a [T]; + + #[inline] + fn next(&mut self) -> Option { + if self.slice.is_empty() { + None + } else { + let mut len = 1; + let mut iter = self.slice.windows(2); + while let Some([l, r]) = iter.next() { + if (self.predicate)(l, r) { + len += 1 + } else { + break; + } + } + let (head, tail) = self.slice.split_at(len); + self.slice = tail; + Some(head) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.slice.is_empty() { + (0, Some(0)) + } else { + (1, Some(self.slice.len())) + } + } + + #[inline] + fn last(mut self) -> Option { + self.next_back() + } +} + +impl<'a, T: 'a, P> DoubleEndedIterator for GroupBy<'a, T, P> +where + P: FnMut(&T, &T) -> bool, +{ + #[inline] + fn next_back(&mut self) -> Option { + if self.slice.is_empty() { + None + } else { + let mut len = 1; + let mut iter = self.slice.windows(2); + while let Some([l, r]) = iter.next_back() { + if (self.predicate)(l, r) { + len += 1 + } else { + break; + } + } + let (head, tail) = self.slice.split_at(self.slice.len() - len); + self.slice = head; + Some(tail) + } + } +} + +impl<'a, T: 'a, P> std::iter::FusedIterator for GroupBy<'a, T, P> where P: FnMut(&T, &T) -> bool {} + +impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for GroupBy<'a, T, P> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GroupBy") + .field("slice", &self.slice) + .finish() + } +} \ No newline at end of file diff --git a/gstreamer/src/fmp4mux/imp.rs b/gstreamer/src/fmp4mux/imp.rs new file mode 100644 index 0000000..0c1c8fc --- /dev/null +++ b/gstreamer/src/fmp4mux/imp.rs @@ -0,0 +1,3869 @@ +// Copyright (C) 2021 Sebastian Dröge +// +// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0. +// If a copy of the MPL was not distributed with this file, You can obtain one at +// . +// +// SPDX-License-Identifier: MPL-2.0 + +use gst::glib; +use gst::prelude::*; +use gst::subclass::prelude::*; +use gst_base::prelude::*; +use gst_base::subclass::prelude::*; + +use std::collections::VecDeque; +use std::mem; +use std::sync::Mutex; + +use gst::glib::once_cell::sync::Lazy; + +use super::boxes; +use super::Buffer; +use super::DeltaFrames; + +/// Offset for the segment in non-single-stream variants. +const SEGMENT_OFFSET: gst::ClockTime = gst::ClockTime::from_seconds(60 * 60 * 1000); + +/// Offset between UNIX epoch and Jan 1 1601 epoch in seconds. +/// 1601 = UNIX + UNIX_1601_OFFSET. +const UNIX_1601_OFFSET: u64 = 11_644_473_600; + +/// Offset between NTP and UNIX epoch in seconds. +/// NTP = UNIX + NTP_UNIX_OFFSET. +const NTP_UNIX_OFFSET: u64 = 2_208_988_800; + +/// Reference timestamp meta caps for NTP timestamps. +static NTP_CAPS: Lazy = Lazy::new(|| gst::Caps::builder("timestamp/x-ntp").build()); + +/// Reference timestamp meta caps for UNIX timestamps. +static UNIX_CAPS: Lazy = Lazy::new(|| gst::Caps::builder("timestamp/x-unix").build()); + +/// Returns the UTC time of the buffer in the UNIX epoch. +fn get_utc_time_from_buffer(buffer: &gst::BufferRef) -> Option { + buffer + .iter_meta::() + .find_map(|meta| { + if meta.reference().can_intersect(&UNIX_CAPS) { + Some(meta.timestamp()) + } else if meta.reference().can_intersect(&NTP_CAPS) { + meta.timestamp().checked_sub(NTP_UNIX_OFFSET.seconds()) + } else { + None + } + }) +} + +/// Converts a running time to an UTC time. +fn running_time_to_utc_time( + running_time: impl Into>, + running_time_utc_time_mapping: ( + impl Into>, + impl Into>, + ), +) -> Option { + running_time_utc_time_mapping + .1 + .into() + .checked_sub(running_time_utc_time_mapping.0.into()) + .and_then(|res| res.checked_add(running_time.into())) + .and_then(|res| res.positive()) +} + +/// Converts an UTC time to a running time. +fn utc_time_to_running_time( + utc_time: gst::ClockTime, + running_time_utc_time_mapping: ( + impl Into>, + impl Into>, + ), +) -> Option { + running_time_utc_time_mapping + .0 + .into() + .checked_sub(running_time_utc_time_mapping.1.into()) + .and_then(|res| res.checked_add_unsigned(utc_time)) + .and_then(|res| res.positive()) +} + +static CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "fmp4mux", + gst::DebugColorFlags::empty(), + Some("FMP4Mux Element"), + ) +}); + +const DEFAULT_FRAGMENT_DURATION: gst::ClockTime = gst::ClockTime::from_seconds(10); +const DEFAULT_CHUNK_DURATION: Option = gst::ClockTime::NONE; +const DEFAULT_HEADER_UPDATE_MODE: super::HeaderUpdateMode = super::HeaderUpdateMode::None; +const DEFAULT_WRITE_MFRA: bool = false; +const DEFAULT_WRITE_MEHD: bool = false; +const DEFAULT_INTERLEAVE_BYTES: Option = None; +const DEFAULT_INTERLEAVE_TIME: Option = Some(gst::ClockTime::from_mseconds(250)); + +#[derive(Debug, Clone)] +struct Settings { + fragment_duration: gst::ClockTime, + chunk_duration: Option, + header_update_mode: super::HeaderUpdateMode, + write_mfra: bool, + write_mehd: bool, + interleave_bytes: Option, + interleave_time: Option, + movie_timescale: u32, + offset_to_zero: bool, +} + +impl Default for Settings { + fn default() -> Self { + Settings { + fragment_duration: DEFAULT_FRAGMENT_DURATION, + chunk_duration: DEFAULT_CHUNK_DURATION, + header_update_mode: DEFAULT_HEADER_UPDATE_MODE, + write_mfra: DEFAULT_WRITE_MFRA, + write_mehd: DEFAULT_WRITE_MEHD, + interleave_bytes: DEFAULT_INTERLEAVE_BYTES, + interleave_time: DEFAULT_INTERLEAVE_TIME, + movie_timescale: 0, + offset_to_zero: false, + } + } +} + +#[derive(Debug, Clone)] +struct PreQueuedBuffer { + /// Buffer + /// + /// Buffer PTS/DTS are updated to the output segment in multi-stream configurations. + buffer: gst::Buffer, + + /// PTS + /// + /// In ONVIF mode this is the UTC time, otherwise it is the PTS running time. + pts: gst::ClockTime, + + /// End PTS + /// + /// In ONVIF mode this is the UTC time, otherwise it is the PTS running time. + end_pts: gst::ClockTime, + + /// DTS + /// + /// In ONVIF mode this is the UTC time, otherwise it is the DTS running time. + dts: Option>, + + /// End DTS + /// + /// In ONVIF mode this is the UTC time, otherwise it is the DTS running time. + end_dts: Option>, +} + +#[derive(Debug)] +struct GopBuffer { + buffer: gst::Buffer, + pts: gst::ClockTime, + pts_position: gst::ClockTime, + dts: Option, +} + +#[derive(Debug)] +struct Gop { + /// Start PTS. + start_pts: gst::ClockTime, + /// Start DTS. + start_dts: Option, + /// Earliest PTS. + earliest_pts: gst::ClockTime, + /// Once this is known to be the final earliest PTS/DTS + final_earliest_pts: bool, + /// PTS plus duration of last buffer, or start of next GOP + end_pts: gst::ClockTime, + /// Once this is known to be the final end PTS/DTS + final_end_pts: bool, + /// DTS plus duration of last buffer, or start of next GOP + end_dts: Option, + + /// Earliest PTS buffer position + earliest_pts_position: gst::ClockTime, + + /// Buffer, PTS running time, DTS running time + buffers: Vec, +} + +struct Stream { + /// Sink pad for this stream. + sinkpad: super::FMP4MuxPad, + + /// Pre-queue for ONVIF variant to timestamp all buffers with their UTC time. + /// + /// In non-ONVIF mode this just collects the PTS/DTS and the corresponding running + /// times for later usage. + pre_queue: VecDeque, + + /// Currently configured caps for this stream. + caps: gst::Caps, + /// Whether this stream is intra-only and has frame reordering. + delta_frames: DeltaFrames, + + /// Currently queued GOPs, including incomplete ones. + queued_gops: VecDeque, + /// Whether the fully queued GOPs are filling a whole fragment. + fragment_filled: bool, + /// Whether a whole chunk is queued. + chunk_filled: bool, + + /// Difference between the first DTS and 0 in case of negative DTS + dts_offset: Option, + + /// Current position (DTS, or PTS for intra-only) to prevent + /// timestamps from going backwards when queueing new buffers + current_position: gst::ClockTime, + + /// Mapping between running time and UTC time in ONVIF mode. + running_time_utc_time_mapping: Option<(gst::Signed, gst::ClockTime)>, +} + +#[derive(Default)] +struct State { + /// Currently configured streams. + streams: Vec, + + /// Stream header with ftyp and moov box. + /// + /// Created once we received caps and kept up to date with the caps, + /// sent as part of the buffer list for the first fragment. + stream_header: Option, + + /// Sequence number of the current fragment. + sequence_number: u32, + + /// Fragment tracking for mfra box + current_offset: u64, + fragment_offsets: Vec, + + /// Earliest PTS of the whole stream + earliest_pts: Option, + /// Current end PTS of the whole stream + end_pts: Option, + /// Start DTS of the whole stream + start_dts: Option, + + /// Start PTS of the current fragment + fragment_start_pts: Option, + /// Start PTS of the current chunk + /// + /// This is equal to `fragment_start_pts` if the current chunk is the first of a fragment, + /// and always equal to `fragment_start_pts` if no `chunk_duration` is set. + chunk_start_pts: Option, + /// Additional timeout delay in case GOPs are bigger than the fragment duration + timeout_delay: gst::ClockTime, + + /// If headers (ftyp / moov box) were sent. + sent_headers: bool, +} + +#[derive(Default)] +pub(crate) struct FMP4Mux { + state: Mutex, + settings: Mutex, +} + +impl FMP4Mux { + /// Checks if a buffer is valid according to the stream configuration. + fn check_buffer( + buffer: &gst::BufferRef, + sinkpad: &super::FMP4MuxPad, + delta_frames: super::DeltaFrames, + ) -> Result<(), gst::FlowError> { + if delta_frames.requires_dts() && buffer.dts().is_none() { + gst::error!(CAT, obj: sinkpad, "Require DTS for video streams"); + return Err(gst::FlowError::Error); + } + + if buffer.pts().is_none() { + gst::error!(CAT, obj: sinkpad, "Require timestamped buffers"); + return Err(gst::FlowError::Error); + } + + if delta_frames.intra_only() && buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) { + gst::error!(CAT, obj: sinkpad, "Intra-only stream with delta units"); + return Err(gst::FlowError::Error); + } + + Ok(()) + } + + /// Peek the currently queued buffer on this stream. + /// + /// This also determines the PTS/DTS that is finally going to be used, including + /// timestamp conversion to the UTC times in ONVIF mode. + fn peek_buffer( + &self, + stream: &mut Stream, + fragment_duration: gst::ClockTime, + ) -> Result, gst::FlowError> { + // If not in ONVIF mode or the mapping is already known and there is a pre-queued buffer + // then we can directly return it from here. + if self.obj().class().as_ref().variant != super::Variant::ONVIF + || stream.running_time_utc_time_mapping.is_some() + { + if let Some(pre_queued_buffer) = stream.pre_queue.front() { + return Ok(Some(pre_queued_buffer.clone())); + } + } + + // Pop buffer here, it will be stored in the pre-queue after calculating its timestamps + let mut buffer = match stream.sinkpad.pop_buffer() { + None => return Ok(None), + Some(buffer) => buffer, + }; + + Self::check_buffer(&buffer, &stream.sinkpad, stream.delta_frames)?; + + let segment = match stream.sinkpad.segment().downcast::().ok() { + Some(segment) => segment, + None => { + gst::error!(CAT, obj: stream.sinkpad, "Got buffer before segment"); + return Err(gst::FlowError::Error); + } + }; + + let pts_position = buffer.pts().unwrap(); + let duration = buffer.duration(); + let end_pts_position = duration.opt_add(pts_position).unwrap_or(pts_position); + + let pts = segment + .to_running_time_full(pts_position) + .ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Couldn't convert PTS to running time"); + gst::FlowError::Error + })? + .positive() + .unwrap_or_else(|| { + gst::warning!(CAT, obj: stream.sinkpad, "Negative PTSs are not supported"); + gst::ClockTime::ZERO + }); + + let end_pts = segment + .to_running_time_full(end_pts_position) + .ok_or_else(|| { + gst::error!( + CAT, + obj: stream.sinkpad, + "Couldn't convert end PTS to running time" + ); + gst::FlowError::Error + })? + .positive() + .unwrap_or_else(|| { + gst::warning!(CAT, obj: stream.sinkpad, "Negative PTSs are not supported"); + gst::ClockTime::ZERO + }); + + let (dts, end_dts) = if !stream.delta_frames.requires_dts() { + (None, None) + } else { + // Negative DTS are handled via the dts_offset and by having negative composition time + // offsets in the `trun` box. The smallest DTS here is shifted to zero. + let dts_position = buffer.dts().expect("not DTS"); + let end_dts_position = duration.opt_add(dts_position).unwrap_or(dts_position); + + let dts = segment.to_running_time_full(dts_position).ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Couldn't convert DTS to running time"); + gst::FlowError::Error + })?; + + let end_dts = segment + .to_running_time_full(end_dts_position) + .ok_or_else(|| { + gst::error!( + CAT, + obj: stream.sinkpad, + "Couldn't convert end DTS to running time" + ); + gst::FlowError::Error + })?; + + let end_dts = std::cmp::max(end_dts, dts); + + (Some(dts), Some(end_dts)) + }; + + // If this is a multi-stream element then we need to update the PTS/DTS positions according + // to the output segment, specifically to re-timestamp them with the running time and + // adjust for the segment shift to compensate for negative DTS. + if !self.obj().class().as_ref().variant.is_single_stream() { + let pts_position = pts + SEGMENT_OFFSET; + let dts_position = dts.map(|dts| { + dts.checked_add_unsigned(SEGMENT_OFFSET) + .and_then(|dts| dts.positive()) + .unwrap_or(gst::ClockTime::ZERO) + }); + + let buffer = buffer.make_mut(); + buffer.set_pts(pts_position); + buffer.set_dts(dts_position); + } + + if self.obj().class().as_ref().variant != super::Variant::ONVIF { + // Store in the queue so we don't have to recalculate this all the time + stream.pre_queue.push_back(PreQueuedBuffer { + buffer, + pts, + end_pts, + dts, + end_dts, + }); + } else if let Some(running_time_utc_time_mapping) = stream.running_time_utc_time_mapping { + // For ONVIF we need to re-timestamp the buffer with its UTC time. + // + // After re-timestamping, put the buffer into the pre-queue so re-timestamping only has to + // happen once. + let utc_time = match get_utc_time_from_buffer(&buffer) { + None => { + // Calculate from the mapping + running_time_to_utc_time(pts, running_time_utc_time_mapping).ok_or_else( + || { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative PTS UTC time"); + gst::FlowError::Error + }, + )? + } + Some(utc_time) => utc_time, + }; + gst::trace!( + CAT, + obj: stream.sinkpad, + "Mapped PTS running time {pts} to UTC time {utc_time}" + ); + + let end_pts_utc_time = + running_time_to_utc_time(end_pts, (pts, utc_time)).ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative end PTS UTC time"); + gst::FlowError::Error + })?; + + let (dts_utc_time, end_dts_utc_time) = if let Some(dts) = dts { + let dts_utc_time = + running_time_to_utc_time(dts, (pts, utc_time)).ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative DTS UTC time"); + gst::FlowError::Error + })?; + gst::trace!( + CAT, + obj: stream.sinkpad, + "Mapped DTS running time {dts} to UTC time {dts_utc_time}" + ); + + let end_dts_utc_time = running_time_to_utc_time(end_dts.unwrap(), (pts, utc_time)) + .ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative end DTS UTC time"); + gst::FlowError::Error + })?; + + ( + Some(gst::Signed::Positive(dts_utc_time)), + Some(gst::Signed::Positive(end_dts_utc_time)), + ) + } else { + (None, None) + }; + + stream.pre_queue.push_back(PreQueuedBuffer { + buffer, + pts: utc_time, + end_pts: end_pts_utc_time, + dts: dts_utc_time, + end_dts: end_dts_utc_time, + }); + } else { + // In ONVIF mode we need to get UTC times for each buffer and synchronize based on that. + // Queue up to min(6s, fragment_duration) of data in the very beginning to get the first UTC time and then backdate. + if let Some((last, first)) = + Option::zip(stream.pre_queue.back(), stream.pre_queue.front()) + { + // Existence of PTS/DTS checked below + let (last, first) = if stream.delta_frames.requires_dts() { + (last.end_dts.unwrap(), first.end_dts.unwrap()) + } else { + ( + gst::Signed::Positive(last.end_pts), + gst::Signed::Positive(first.end_pts), + ) + }; + + let limit = std::cmp::min(gst::ClockTime::from_seconds(6), fragment_duration); + if last.saturating_sub(first) > gst::Signed::Positive(limit) { + gst::error!( + CAT, + obj: stream.sinkpad, + "Got no UTC time in the first {limit} of the stream" + ); + return Err(gst::FlowError::Error); + } + } + + let utc_time = match get_utc_time_from_buffer(&buffer) { + Some(utc_time) => utc_time, + None => { + stream.pre_queue.push_back(PreQueuedBuffer { + buffer, + pts, + end_pts, + dts, + end_dts, + }); + return Err(gst_base::AGGREGATOR_FLOW_NEED_DATA); + } + }; + + let mapping = (gst::Signed::Positive(pts), utc_time); + stream.running_time_utc_time_mapping = Some(mapping); + + // Push the buffer onto the pre-queue and re-timestamp it and all other buffers + // based on the mapping above once we have an UTC time. + stream.pre_queue.push_back(PreQueuedBuffer { + buffer, + pts, + end_pts, + dts, + end_dts, + }); + + for pre_queued_buffer in stream.pre_queue.iter_mut() { + let pts_utc_time = running_time_to_utc_time(pre_queued_buffer.pts, mapping) + .ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative PTS UTC time"); + gst::FlowError::Error + })?; + gst::trace!( + CAT, + obj: stream.sinkpad, + "Mapped PTS running time {} to UTC time {pts_utc_time}", + pre_queued_buffer.pts, + ); + pre_queued_buffer.pts = pts_utc_time; + + let end_pts_utc_time = running_time_to_utc_time(pre_queued_buffer.end_pts, mapping) + .ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative end PTS UTC time"); + gst::FlowError::Error + })?; + pre_queued_buffer.end_pts = end_pts_utc_time; + + if let Some(dts) = pre_queued_buffer.dts { + let dts_utc_time = running_time_to_utc_time(dts, mapping).ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative DTS UTC time"); + gst::FlowError::Error + })?; + gst::trace!( + CAT, + obj: stream.sinkpad, + "Mapped DTS running time {dts} to UTC time {dts_utc_time}" + ); + pre_queued_buffer.dts = Some(gst::Signed::Positive(dts_utc_time)); + + let end_dts_utc_time = running_time_to_utc_time( + pre_queued_buffer.end_dts.unwrap(), + mapping, + ) + .ok_or_else(|| { + gst::error!(CAT, obj: stream.sinkpad, "Stream has negative DTS UTC time"); + gst::FlowError::Error + })?; + pre_queued_buffer.end_dts = Some(gst::Signed::Positive(end_dts_utc_time)); + } + } + + // Fall through and return the front of the queue + } + + Ok(Some(stream.pre_queue.front().unwrap().clone())) + } + + /// Pop the currently queued buffer from this stream. + fn pop_buffer(&self, stream: &mut Stream) -> PreQueuedBuffer { + // Only allowed to be called after peek was successful so there must be a buffer now + // or in ONVIF mode we must also know the mapping now. + + assert!(!stream.pre_queue.is_empty()); + if self.obj().class().as_ref().variant == super::Variant::ONVIF { + assert!(stream.running_time_utc_time_mapping.is_some()); + } + + stream.pre_queue.pop_front().unwrap() + } + + /// Finds the stream that has the earliest buffer queued. + fn find_earliest_stream<'a>( + &self, + state: &'a mut State, + timeout: bool, + fragment_duration: gst::ClockTime, + ) -> Result, gst::FlowError> { + if state + .streams + .iter() + .all(|s| s.fragment_filled || s.chunk_filled) + { + gst::trace!( + CAT, + imp: self, + "All streams are currently filled and have to be drained" + ); + return Ok(None); + } + + let mut earliest_stream = None; + let mut all_have_data_or_eos = true; + + for stream in state.streams.iter_mut() { + let pre_queued_buffer = match Self::peek_buffer(self, stream, fragment_duration) { + Ok(Some(buffer)) => buffer, + Ok(None) | Err(gst_base::AGGREGATOR_FLOW_NEED_DATA) => { + if stream.sinkpad.is_eos() { + gst::trace!(CAT, obj: stream.sinkpad, "Stream is EOS"); + } else { + all_have_data_or_eos = false; + gst::trace!(CAT, obj: stream.sinkpad, "Stream has no buffer"); + } + continue; + } + Err(err) => return Err(err), + }; + + gst::trace!( + CAT, + obj: stream.sinkpad, + "Stream has running time PTS {} / DTS {} queued", + pre_queued_buffer.pts, + pre_queued_buffer.dts.display(), + ); + + let running_time = if stream.delta_frames.requires_dts() { + pre_queued_buffer.dts.unwrap() + } else { + gst::Signed::Positive(pre_queued_buffer.pts) + }; + + if earliest_stream + .as_ref() + .map_or(true, |(_stream, earliest_running_time)| { + *earliest_running_time > running_time + }) + { + earliest_stream = Some((stream, running_time)); + } + } + + if !timeout && !all_have_data_or_eos { + gst::trace!( + CAT, + imp: self, + "No timeout and not all streams have a buffer or are EOS" + ); + Ok(None) + } else if let Some((stream, earliest_running_time)) = earliest_stream { + gst::trace!( + CAT, + imp: self, + "Stream {} is earliest stream with running time {}", + stream.sinkpad.name(), + earliest_running_time + ); + Ok(Some(stream)) + } else { + gst::trace!(CAT, imp: self, "No streams have data queued currently"); + Ok(None) + } + } + + /// Queue incoming buffer as individual GOPs. + fn queue_gops( + &self, + stream: &mut Stream, + mut pre_queued_buffer: PreQueuedBuffer, + ) -> Result<(), gst::FlowError> { + gst::trace!(CAT, obj: stream.sinkpad, "Handling buffer {:?}", pre_queued_buffer); + + let delta_frames = stream.delta_frames; + + // Enforce monotonically increasing PTS for intra-only streams, and DTS otherwise + if !delta_frames.requires_dts() { + if pre_queued_buffer.pts < stream.current_position { + gst::warning!( + CAT, + obj: stream.sinkpad, + "Decreasing PTS {} < {}", + pre_queued_buffer.pts, + stream.current_position, + ); + pre_queued_buffer.pts = stream.current_position; + } else { + stream.current_position = pre_queued_buffer.pts; + } + pre_queued_buffer.end_pts = + std::cmp::max(pre_queued_buffer.end_pts, pre_queued_buffer.pts); + } else { + // Negative DTS are handled via the dts_offset and by having negative composition time + // offsets in the `trun` box. The smallest DTS here is shifted to zero. + let dts = match pre_queued_buffer.dts.unwrap() { + gst::Signed::Positive(dts) => { + if let Some(dts_offset) = stream.dts_offset { + dts + dts_offset + } else { + dts + } + } + gst::Signed::Negative(dts) => { + if stream.dts_offset.is_none() { + stream.dts_offset = Some(dts); + } + + let dts_offset = stream.dts_offset.unwrap(); + if dts > dts_offset { + gst::warning!(CAT, obj: stream.sinkpad, "DTS before first DTS"); + gst::ClockTime::ZERO + } else { + dts_offset - dts + } + } + }; + + let end_dts = match pre_queued_buffer.end_dts.unwrap() { + gst::Signed::Positive(dts) => { + if let Some(dts_offset) = stream.dts_offset { + dts + dts_offset + } else { + dts + } + } + gst::Signed::Negative(dts) => { + let dts_offset = stream.dts_offset.unwrap(); + if dts > dts_offset { + gst::warning!(CAT, obj: stream.sinkpad, "End DTS before first DTS"); + gst::ClockTime::ZERO + } else { + dts_offset - dts + } + } + }; + + // Enforce monotonically increasing DTS for intra-only streams + // NOTE: PTS stays the same so this will cause a bigger PTS/DTS difference + // FIXME: Is this correct? + if dts < stream.current_position { + gst::warning!( + CAT, + obj: stream.sinkpad, + "Decreasing DTS {} < {}", + dts, + stream.current_position, + ); + pre_queued_buffer.dts = Some(gst::Signed::Positive(stream.current_position)); + } else { + pre_queued_buffer.dts = Some(gst::Signed::Positive(dts)); + stream.current_position = dts; + } + pre_queued_buffer.end_dts = Some(gst::Signed::Positive(std::cmp::max(end_dts, dts))); + } + + let PreQueuedBuffer { + buffer, + pts, + end_pts, + dts, + end_dts, + } = pre_queued_buffer; + + let dts = dts.map(|v| v.positive().unwrap()); + let end_dts = end_dts.map(|v| v.positive().unwrap()); + + let pts_position = buffer.pts().unwrap(); + + if !buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Starting new GOP at PTS {} DTS {} (DTS offset {})", + pts, + dts.display(), + stream.dts_offset.display(), + ); + + let gop = Gop { + start_pts: pts, + start_dts: dts, + earliest_pts: pts, + earliest_pts_position: pts_position, + final_earliest_pts: !delta_frames.requires_dts(), + end_pts, + end_dts, + final_end_pts: false, + buffers: vec![GopBuffer { + buffer, + pts, + pts_position, + dts, + }], + }; + stream.queued_gops.push_front(gop); + + if let Some(prev_gop) = stream.queued_gops.get_mut(1) { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Updating previous GOP starting at PTS {} to end PTS {} DTS {}", + prev_gop.earliest_pts, + pts, + dts.display(), + ); + + prev_gop.end_pts = std::cmp::max(prev_gop.end_pts, pts); + prev_gop.end_dts = std::cmp::max(prev_gop.end_dts, dts); + + if !delta_frames.requires_dts() { + prev_gop.final_end_pts = true; + } + + if !prev_gop.final_earliest_pts { + // Don't bother logging this for intra-only streams as it would be for every + // single buffer. + if delta_frames.requires_dts() { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Previous GOP has final earliest PTS at {}", + prev_gop.earliest_pts + ); + } + + prev_gop.final_earliest_pts = true; + if let Some(prev_prev_gop) = stream.queued_gops.get_mut(2) { + prev_prev_gop.final_end_pts = true; + } + } + } + } else if let Some(gop) = stream.queued_gops.front_mut() { + assert!(!delta_frames.intra_only()); + + gop.end_pts = std::cmp::max(gop.end_pts, end_pts); + gop.end_dts = gop.end_dts.opt_max(end_dts); + gop.buffers.push(GopBuffer { + buffer, + pts, + pts_position, + dts, + }); + + if delta_frames.requires_dts() { + let dts = dts.unwrap(); + + if gop.earliest_pts > pts && !gop.final_earliest_pts { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Updating current GOP earliest PTS from {} to {}", + gop.earliest_pts, + pts + ); + gop.earliest_pts = pts; + gop.earliest_pts_position = pts_position; + + if let Some(prev_gop) = stream.queued_gops.get_mut(1) { + if prev_gop.end_pts < pts { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Updating previous GOP starting PTS {} end time from {} to {}", + pts, + prev_gop.end_pts, + pts + ); + prev_gop.end_pts = pts; + } + } + } + + let gop = stream.queued_gops.front_mut().unwrap(); + + // The earliest PTS is known when the current DTS is bigger or equal to the first + // PTS that was observed in this GOP. If there was another frame later that had a + // lower PTS then it wouldn't be possible to display it in time anymore, i.e. the + // stream would be invalid. + if gop.start_pts <= dts && !gop.final_earliest_pts { + gst::debug!( + CAT, + obj: stream.sinkpad, + "GOP has final earliest PTS at {}", + gop.earliest_pts + ); + gop.final_earliest_pts = true; + + if let Some(prev_gop) = stream.queued_gops.get_mut(1) { + prev_gop.final_end_pts = true; + } + } + } + } else { + gst::warning!( + CAT, + obj: stream.sinkpad, + "Waiting for keyframe at the beginning of the stream" + ); + } + + if let Some((prev_gop, first_gop)) = Option::zip( + stream.queued_gops.iter().find(|gop| gop.final_end_pts), + stream.queued_gops.back(), + ) { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Queued full GOPs duration updated to {}", + prev_gop.end_pts.saturating_sub(first_gop.earliest_pts), + ); + } + + gst::debug!( + CAT, + obj: stream.sinkpad, + "Queued duration updated to {}", + Option::zip(stream.queued_gops.front(), stream.queued_gops.back()) + .map(|(end, start)| end.end_pts.saturating_sub(start.start_pts)) + .unwrap_or(gst::ClockTime::ZERO) + ); + + Ok(()) + } + + /// Queue buffers from all streams that are not filled for the current fragment yet + fn queue_available_buffers( + &self, + state: &mut State, + settings: &Settings, + timeout: bool, + ) -> Result<(), gst::FlowError> { + let fragment_start_pts = state.fragment_start_pts; + let chunk_start_pts = state.chunk_start_pts; + + // Always take a buffer from the stream with the earliest queued buffer to keep the + // fill-level at all sinkpads in sync. + while let Some(stream) = + self.find_earliest_stream(state, timeout, settings.fragment_duration)? + { + let pre_queued_buffer = Self::pop_buffer(self, stream); + + // Queue up the buffer and update GOP tracking state + self.queue_gops(stream, pre_queued_buffer)?; + + // Check if this stream is filled enough now. + self.check_stream_filled(settings, stream, fragment_start_pts, chunk_start_pts, false); + } + + Ok(()) + } + + /// Check if the stream is filled enough for the current chunk / fragment. + fn check_stream_filled( + &self, + settings: &Settings, + stream: &mut Stream, + fragment_start_pts: Option, + chunk_start_pts: Option, + all_eos: bool, + ) { + // Either both are none or neither + let (chunk_start_pts, fragment_start_pts) = match (chunk_start_pts, fragment_start_pts) { + (Some(chunk_start_pts), Some(fragment_start_pts)) => { + (chunk_start_pts, fragment_start_pts) + } + _ => return, + }; + + // Check if this stream is filled enough now. + if let Some(chunk_duration) = settings.chunk_duration { + // In chunk mode + gst::trace!( + CAT, + obj: stream.sinkpad, + "Current chunk start {}, current fragment start {}", + chunk_start_pts, + fragment_start_pts, + ); + + let chunk_end_pts = chunk_start_pts + chunk_duration; + let fragment_end_pts = fragment_start_pts + settings.fragment_duration; + + if fragment_end_pts < chunk_end_pts { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Current chunk end {}, current fragment end {}. Fragment end before chunk end, extending fragment", + chunk_end_pts, + fragment_end_pts, + ); + } else { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Current chunk end {}, current fragment end {}", + chunk_end_pts, + fragment_end_pts, + ); + } + + // First check if the next split should be the end of a fragment or the end of a chunk. + // If both are the same then a fragment split has preference. + if fragment_end_pts <= chunk_end_pts { + // If the first GOP already starts after the fragment end PTS then this stream is + // filled in the sense that it will not have any buffers for this chunk. + if let Some(gop) = stream.queued_gops.back() { + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP {} start PTS {}, GOP end PTS {}", + stream.queued_gops.len() - 1, + gop.start_pts, + gop.end_pts, + ); + // If this GOP starts after the end of the current fragment, i.e. is not + // included at all, then consider this stream filled as it won't contribute to + // this fragment. + // + // However if the first buffer of the GOP is not actually a keyframe then we + // previously drained a partial GOP because the GOP is ending too far after the + // planned fragment end. + if gop.start_pts > fragment_end_pts + && !gop.buffers.first().map_or(false, |b| { + b.buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) + }) + { + gst::debug!(CAT, obj: stream.sinkpad, "Stream's first GOP starting after this fragment"); + stream.fragment_filled = true; + return; + } + } + + // We can only finish a fragment if a full GOP with final end PTS is queued and it + // ends at or after the fragment end PTS. + if let Some((gop_idx, gop)) = stream + .queued_gops + .iter() + .enumerate() + .find(|(_idx, gop)| gop.final_end_pts || all_eos || stream.sinkpad.is_eos()) + { + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP {gop_idx} start PTS {}, GOP end PTS {}", + gop.start_pts, + gop.end_pts, + ); + if gop.end_pts >= fragment_end_pts { + gst::debug!(CAT, obj: stream.sinkpad, "Stream queued enough data for finishing this fragment"); + stream.fragment_filled = true; + return; + } + } + } + + if !stream.fragment_filled { + // If the first GOP already starts after the chunk end PTS then this stream is + // filled in the sense that it will not have any buffers for this chunk. + if let Some(gop) = stream.queued_gops.back() { + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP {} start PTS {}, GOP end PTS {}", + stream.queued_gops.len() - 1, + gop.start_pts, + gop.end_pts, + ); + if gop.start_pts > chunk_end_pts { + gst::debug!(CAT, obj: stream.sinkpad, "Stream's first GOP starting after this chunk"); + stream.chunk_filled = true; + return; + } + } + + // We can only finish a chunk if a full GOP with final end PTS is queued and it + // ends at or after the fragment end PTS. + let (gop_idx, gop) = match stream.queued_gops.iter().enumerate().find( + |(_idx, gop)| gop.final_earliest_pts || all_eos || stream.sinkpad.is_eos(), + ) { + Some(res) => res, + None => { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Chunked mode and want to finish fragment but no GOP with final end PTS known yet", + ); + return; + } + }; + + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP {gop_idx} start PTS {}, GOP end PTS {} (final {})", + gop.start_pts, + gop.end_pts, + gop.final_end_pts || all_eos || stream.sinkpad.is_eos(), + ); + let last_pts = gop.buffers.last().map(|b| b.pts); + + if gop.end_pts >= chunk_end_pts + // only if there's another GOP or at least one further buffer + && (gop_idx > 0 + || last_pts.map_or(false, |last_pts| last_pts.saturating_sub(chunk_start_pts) > chunk_duration)) + { + gst::debug!(CAT, obj: stream.sinkpad, "Stream queued enough data for this chunk"); + stream.chunk_filled = true; + } + } + } else { + // Check if the end of the latest finalized GOP is after the fragment end + let fragment_end_pts = fragment_start_pts + settings.fragment_duration; + gst::trace!( + CAT, + obj: stream.sinkpad, + "Current fragment start {}, current fragment end {}", + fragment_start_pts, + fragment_start_pts + settings.fragment_duration, + ); + + // If the first GOP already starts after the fragment end PTS then this stream is + // filled in the sense that it will not have any buffers for this fragment. + if let Some(gop) = stream.queued_gops.back() { + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP {} start PTS {}, GOP end PTS {}", + stream.queued_gops.len() - 1, + gop.start_pts, + gop.end_pts, + ); + if gop.start_pts > fragment_end_pts { + gst::debug!(CAT, obj: stream.sinkpad, "Stream's first GOP starting after this fragment"); + stream.fragment_filled = true; + return; + } + } + + let (gop_idx, gop) = match stream + .queued_gops + .iter() + .enumerate() + .find(|(_gop_idx, gop)| gop.final_end_pts || all_eos || stream.sinkpad.is_eos()) + { + Some(gop) => gop, + None => { + gst::trace!(CAT, obj: stream.sinkpad, "Fragment mode but no GOP with final end PTS known yet"); + return; + } + }; + + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP {gop_idx} start PTS {}, GOP end PTS {}", + gop.start_pts, + gop.end_pts, + ); + + if gop.end_pts >= fragment_end_pts { + gst::debug!(CAT, obj: stream.sinkpad, "Stream queued enough data for this fragment"); + stream.fragment_filled = true; + } + } + } + + /// Calculate earliest PTS, i.e. PTS of the very first fragment. + /// + /// This also sends a force-keyunit event for the start of the second fragment. + fn calculate_earliest_pts( + &self, + settings: &Settings, + state: &mut State, + upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>, + all_eos: bool, + timeout: bool, + ) { + if state.earliest_pts.is_some() { + return; + } + + // Calculate the earliest PTS after queueing input if we can now. + let mut earliest_pts = None; + let mut start_dts = None; + for stream in &state.streams { + let (stream_earliest_pts, stream_start_dts) = match stream.queued_gops.back() { + None => { + if !all_eos && !timeout { + earliest_pts = None; + start_dts = None; + break; + } + continue; + } + Some(oldest_gop) => { + if !all_eos && !timeout && !oldest_gop.final_earliest_pts { + earliest_pts = None; + start_dts = None; + break; + } + + (oldest_gop.earliest_pts, oldest_gop.start_dts) + } + }; + + if earliest_pts.opt_gt(stream_earliest_pts).unwrap_or(true) { + earliest_pts = Some(stream_earliest_pts); + } + + if let Some(stream_start_dts) = stream_start_dts { + if start_dts.opt_gt(stream_start_dts).unwrap_or(true) { + start_dts = Some(stream_start_dts); + } + } + } + + let earliest_pts = match earliest_pts { + Some(earliest_pts) => earliest_pts, + None => return, + }; + + // The earliest PTS is known and as such the start of the first and second fragment. + gst::info!( + CAT, + imp: self, + "Got earliest PTS {}, start DTS {} (timeout: {timeout}, all eos: {all_eos})", + earliest_pts, + start_dts.display() + ); + state.earliest_pts = Some(earliest_pts); + state.start_dts = start_dts; + state.fragment_start_pts = Some(earliest_pts); + state.chunk_start_pts = Some(earliest_pts); + + // Now send force-keyunit events for the second fragment start. + let fku_time = earliest_pts + settings.fragment_duration; + for stream in &state.streams { + let current_position = stream.current_position; + + // In case of ONVIF this needs to be converted back from UTC time to + // the stream's running time + let (fku_time, current_position) = + if self.obj().class().as_ref().variant == super::Variant::ONVIF { + ( + if let Some(fku_time) = utc_time_to_running_time( + fku_time, + stream.running_time_utc_time_mapping.unwrap(), + ) { + fku_time + } else { + continue; + }, + utc_time_to_running_time( + current_position, + stream.running_time_utc_time_mapping.unwrap(), + ), + ) + } else { + (fku_time, Some(current_position)) + }; + + let fku_time = + if current_position.map_or(false, |current_position| current_position > fku_time) { + gst::warning!( + CAT, + obj: stream.sinkpad, + "Sending first force-keyunit event late for running time {} at {}", + fku_time, + current_position.display(), + ); + None + } else { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Sending first force-keyunit event for running time {}", + fku_time, + ); + Some(fku_time) + }; + + let fku = gst_video::UpstreamForceKeyUnitEvent::builder() + .running_time(fku_time) + .all_headers(true) + .build(); + + upstream_events.push((stream.sinkpad.clone(), fku)); + } + + let fragment_start_pts = state.fragment_start_pts; + let chunk_start_pts = state.chunk_start_pts; + + // Check if any of the streams are already filled enough for the first chunk/fragment. + for stream in &mut state.streams { + self.check_stream_filled( + settings, + stream, + fragment_start_pts, + chunk_start_pts, + all_eos, + ); + } + } + + /// Drain buffers from a single stream. + #[allow(clippy::too_many_arguments)] + fn drain_buffers_one_stream( + &self, + settings: &Settings, + stream: &mut Stream, + timeout: bool, + all_eos: bool, + fragment_start_pts: gst::ClockTime, + chunk_start_pts: gst::ClockTime, + chunk_end_pts: Option, + fragment_start: bool, + fragment_filled: bool, + ) -> Result, gst::FlowError> { + assert!( + timeout + || all_eos + || stream.sinkpad.is_eos() + || stream.queued_gops.get(1).map(|gop| gop.final_earliest_pts) == Some(true) + || settings.chunk_duration.is_some() + ); + + let mut gops = Vec::with_capacity(stream.queued_gops.len()); + if stream.queued_gops.is_empty() { + return Ok(gops); + } + + // For the first stream drain as much as necessary and decide the end of this + // fragment or chunk, for all other streams drain up to that position. + + if let Some(chunk_duration) = settings.chunk_duration { + // Chunk mode + + let dequeue_end_pts = if let Some(chunk_end_pts) = chunk_end_pts { + // Not the first stream + chunk_end_pts + } else if fragment_filled { + // Fragment is filled, so only dequeue everything until the latest GOP + fragment_start_pts + settings.fragment_duration + } else { + // Fragment is not filled and we either have a full chunk or timeout + chunk_start_pts + chunk_duration + }; + + gst::trace!( + CAT, + obj: stream.sinkpad, + "Draining from {} up to end PTS {} / duration {}", + chunk_start_pts, + dequeue_end_pts, + dequeue_end_pts.saturating_sub(chunk_start_pts), + ); + + while let Some(gop) = stream.queued_gops.back() { + // If this should be the last chunk of a fragment then only drain every + // finished GOP until the chunk end PTS. If there is no finished GOP for + // this stream (it would be not the first stream then), then drain + // everything up to the chunk end PTS. + // + // If this chunk is not the last chunk of a fragment then simply dequeue + // everything up to the chunk end PTS. + if fragment_filled { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Fragment filled, current GOP start {} end {} (final {})", + gop.start_pts, gop.end_pts, + gop.final_end_pts || all_eos || stream.sinkpad.is_eos() + ); + + // If we have a final GOP then include it as long as it's either + // - ending before the dequeue end PTS + // - no GOPs were dequeued yet and this is the first stream + // + // The second case would happen if no GOP ends between the last chunk of the + // fragment and the fragment duration. + if (gop.final_end_pts || all_eos || stream.sinkpad.is_eos()) + && (gop.end_pts <= dequeue_end_pts + || (gops.is_empty() && chunk_end_pts.is_none())) + { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Pushing whole GOP", + ); + gops.push(stream.queued_gops.pop_back().unwrap()); + continue; + } + if !gops.is_empty() { + break; + } + + // Otherwise if this is the first stream, no full GOP is queued and the first + // GOP is starting inside this fragment then we need to wait for more data. + // + // If this is not the first stream then take an incomplete GOP. + if gop.start_pts >= dequeue_end_pts + || (!gop.final_earliest_pts && !all_eos && !stream.sinkpad.is_eos()) + { + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP starts after fragment end", + ); + break; + } else if chunk_end_pts.is_none() { + gst::info!(CAT, obj: stream.sinkpad, "Don't have a full GOP at the end of a fragment for the first stream"); + return Err(gst_base::AGGREGATOR_FLOW_NEED_DATA); + } else { + gst::info!(CAT, obj: stream.sinkpad, "Including incomplete GOP"); + } + } else { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Chunk filled, current GOP start {} end {} (final {})", + gop.start_pts, gop.end_pts, + gop.final_end_pts || all_eos || stream.sinkpad.is_eos() + ); + } + + if gop.end_pts <= dequeue_end_pts + && (gop.final_end_pts || all_eos || stream.sinkpad.is_eos()) + { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Pushing whole GOP", + ); + gops.push(stream.queued_gops.pop_back().unwrap()); + } else if gop.start_pts >= dequeue_end_pts + || (!gop.final_earliest_pts && !all_eos && !stream.sinkpad.is_eos()) + { + gst::trace!( + CAT, + obj: stream.sinkpad, + "GOP starts after chunk end", + ); + break; + } else { + let gop = stream.queued_gops.back_mut().unwrap(); + + let start_pts = gop.start_pts; + let start_dts = gop.start_dts; + let earliest_pts = gop.earliest_pts; + let earliest_pts_position = gop.earliest_pts_position; + + let mut split_index = None; + + for (idx, buffer) in gop.buffers.iter().enumerate() { + if buffer.pts >= dequeue_end_pts { + break; + } + split_index = Some(idx); + } + let split_index = match split_index { + Some(split_index) => split_index, + None => { + // We have B frames and the first buffer of this GOP is too far + // in the future. + gst::trace!( + CAT, + obj: stream.sinkpad, + "First buffer of GOP too far in the future", + ); + break; + } + }; + + // The last buffer of the GOP starts before the chunk end but ends + // after the end. We still take it here and remove the whole GOP. + if split_index == gop.buffers.len() - 1 { + if gop.final_end_pts || all_eos || stream.sinkpad.is_eos() { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Pushing whole GOP", + ); + gops.push(stream.queued_gops.pop_back().unwrap()); + } else { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Can't push whole GOP as it's not final yet", + ); + } + break; + } + + let mut buffers = mem::take(&mut gop.buffers); + // Contains all buffers from `split_index + 1` to the end + gop.buffers = buffers.split_off(split_index + 1); + + gop.start_pts = gop.buffers[0].pts; + gop.start_dts = gop.buffers[0].dts; + gop.earliest_pts_position = gop.buffers[0].pts_position; + gop.earliest_pts = gop.buffers[0].pts; + + gst::trace!( + CAT, + obj: stream.sinkpad, + "Splitting GOP and keeping PTS {}", + gop.buffers[0].pts, + ); + + let queue_gop = Gop { + start_pts, + start_dts, + earliest_pts, + final_earliest_pts: true, + end_pts: gop.start_pts, + final_end_pts: true, + end_dts: gop.start_dts, + earliest_pts_position, + buffers, + }; + + gops.push(queue_gop); + break; + } + } + + if fragment_start { + if let Some(first_buffer) = gops.first().and_then(|gop| gop.buffers.first()) { + if first_buffer + .buffer + .flags() + .contains(gst::BufferFlags::DELTA_UNIT) + { + gst::error!(CAT, obj: stream.sinkpad, "First buffer of a new fragment is not a keyframe"); + } + } + } + } else { + // Non-chunk mode + + let dequeue_end_pts = if let Some(chunk_end_pts) = chunk_end_pts { + // Not the first stream + chunk_end_pts + } else { + fragment_start_pts + settings.fragment_duration + }; + + gst::trace!( + CAT, + obj: stream.sinkpad, + "Draining from {} up to end PTS {} / duration {}", + chunk_start_pts, + dequeue_end_pts, + dequeue_end_pts.saturating_sub(chunk_start_pts), + ); + + while let Some(gop) = stream.queued_gops.back() { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Current GOP start {} end {} (final {})", + gop.start_pts, gop.end_pts, + gop.final_end_pts || all_eos || stream.sinkpad.is_eos() + ); + + // If this GOP is not complete then we can't pop it yet. + // + // If there was no complete GOP at all yet then it might be bigger than the + // fragment duration. In this case we might not be able to handle the latency + // requirements in a live pipeline. + if !gop.final_end_pts && !all_eos && !stream.sinkpad.is_eos() { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Not including GOP without final end PTS", + ); + break; + } + + // If this GOP starts after the fragment end then don't dequeue it yet unless this is + // the first stream and no GOPs were dequeued at all yet. This would mean that the + // GOP is bigger than the fragment duration. + if !all_eos + && gop.end_pts > dequeue_end_pts + && (chunk_end_pts.is_some() || !gops.is_empty()) + { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Not including GOP yet", + ); + break; + } + + gst::trace!( + CAT, + obj: stream.sinkpad, + "Pushing complete GOP", + ); + gops.push(stream.queued_gops.pop_back().unwrap()); + } + } + + Ok(gops) + } + + /// Flatten all GOPs, remove any gaps and calculate durations. + #[allow(clippy::type_complexity)] + fn flatten_gops( + &self, + idx: usize, + stream: &Stream, + gops: Vec, + ) -> Result< + Option<( + // All buffers of the GOPs without gaps + VecDeque, + // Earliest PTS + gst::ClockTime, + // Earliest PTS position + gst::ClockTime, + // End PTS + gst::ClockTime, + // Start DTS + Option, + // Start DTS position + Option, + // End DTS + Option, + )>, + gst::FlowError, + > { + let last_gop = gops.last().unwrap(); + let end_pts = last_gop.end_pts; + let end_dts = last_gop.end_dts; + + let mut gop_buffers = Vec::with_capacity(gops.iter().map(|g| g.buffers.len()).sum()); + gop_buffers.extend(gops.into_iter().flat_map(|gop| gop.buffers.into_iter())); + + // Then calculate durations for all of the buffers and get rid of any GAP buffers in + // the process. + // Also calculate the earliest PTS / start DTS here, which needs to consider GAP + // buffers too. + let mut buffers = VecDeque::with_capacity(gop_buffers.len()); + let mut earliest_pts = None; + let mut earliest_pts_position = None; + let mut start_dts = None; + let mut start_dts_position = None; + + let mut gop_buffers = gop_buffers.into_iter(); + while let Some(buffer) = gop_buffers.next() { + // If this is a GAP buffer then skip it. Its duration was already considered + // below for the non-GAP buffer preceding it, and if there was none then the + // chunk start would be adjusted accordingly for this stream. + if buffer.buffer.flags().contains(gst::BufferFlags::GAP) + && buffer.buffer.flags().contains(gst::BufferFlags::DROPPABLE) + && buffer.buffer.size() == 0 + { + gst::trace!( + CAT, + obj: stream.sinkpad, + "Skipping gap buffer {buffer:?}", + ); + continue; + } + + if earliest_pts.map_or(true, |earliest_pts| buffer.pts < earliest_pts) { + earliest_pts = Some(buffer.pts); + } + if earliest_pts_position.map_or(true, |earliest_pts_position| { + buffer.buffer.pts().unwrap() < earliest_pts_position + }) { + earliest_pts_position = Some(buffer.buffer.pts().unwrap()); + } + if stream.delta_frames.requires_dts() && start_dts.is_none() { + start_dts = Some(buffer.dts.unwrap()); + } + if stream.delta_frames.requires_dts() && start_dts_position.is_none() { + start_dts_position = Some(buffer.buffer.dts().unwrap()); + } + + let timestamp = if !stream.delta_frames.requires_dts() { + buffer.pts + } else { + buffer.dts.unwrap() + }; + + // Take as end timestamp the timestamp of the next non-GAP buffer + let end_timestamp = match gop_buffers.as_slice().iter().find(|buf| { + !buf.buffer.flags().contains(gst::BufferFlags::GAP) + || !buf.buffer.flags().contains(gst::BufferFlags::DROPPABLE) + || buf.buffer.size() != 0 + }) { + Some(buffer) => { + if !stream.delta_frames.requires_dts() { + buffer.pts + } else { + buffer.dts.unwrap() + } + } + None => { + if !stream.delta_frames.requires_dts() { + end_pts + } else { + end_dts.unwrap() + } + } + }; + + // Timestamps are enforced to monotonically increase when queueing buffers + let duration = end_timestamp + .checked_sub(timestamp) + .expect("Timestamps going backwards"); + + let composition_time_offset = if !stream.delta_frames.requires_dts() { + None + } else { + let pts = buffer.pts; + let dts = buffer.dts.unwrap(); + + Some( + i64::try_from( + (gst::Signed::Positive(pts) - gst::Signed::Positive(dts)).nseconds(), + ) + .map_err(|_| { + gst::error!(CAT, obj: stream.sinkpad, "Too big PTS/DTS difference"); + gst::FlowError::Error + })?, + ) + }; + + buffers.push_back(Buffer { + idx, + buffer: buffer.buffer, + timestamp, + duration, + composition_time_offset, + }); + } + + if buffers.is_empty() { + return Ok(None); + } + + let earliest_pts = earliest_pts.unwrap(); + let earliest_pts_position = earliest_pts_position.unwrap(); + if stream.delta_frames.requires_dts() { + assert!(start_dts.is_some()); + assert!(start_dts_position.is_some()); + } + let start_dts = start_dts; + let start_dts_position = start_dts_position; + + Ok(Some(( + buffers, + earliest_pts, + earliest_pts_position, + end_pts, + start_dts, + start_dts_position, + end_dts, + ))) + } + + /// Drain buffers from all streams for the current chunk. + /// + /// Also removes gap buffers, calculates buffer durations and various timestamps relevant for + /// the current chunk. + #[allow(clippy::type_complexity)] + fn drain_buffers( + &self, + state: &mut State, + settings: &Settings, + timeout: bool, + all_eos: bool, + ) -> Result< + ( + // Drained streams + Vec<(super::FragmentHeaderStream, VecDeque)>, + // Minimum earliest PTS position of all streams + Option, + // Minimum earliest PTS of all streams + Option, + // Minimum start DTS position of all streams (if any stream has DTS) + Option, + // End PTS of this drained fragment or chunk, i.e. start PTS of the next fragment or + // chunk + Option, + // With these drained buffers the current fragment is filled + bool, + // These buffers make the start of a new fragment + bool, + ), + gst::FlowError, + > { + let mut drained_streams = Vec::with_capacity(state.streams.len()); + + let mut min_earliest_pts_position = None; + let mut min_earliest_pts = None; + let mut min_start_dts_position = None; + let mut chunk_end_pts = None; + + let fragment_start_pts = state.fragment_start_pts.unwrap(); + let chunk_start_pts = state.chunk_start_pts.unwrap(); + let fragment_start = fragment_start_pts == chunk_start_pts; + + // In fragment mode, each chunk is a full fragment. Otherwise, in chunk mode, + // this fragment is filled if it is filled for the first non-EOS stream + // that has a GOP inside this chunk + let fragment_filled = settings.chunk_duration.is_none() + || state + .streams + .iter() + .find(|s| { + !s.sinkpad.is_eos() + && s.queued_gops.back().map_or(false, |gop| { + gop.start_pts <= fragment_start_pts + settings.fragment_duration + // In chunk mode we might've drained a partial GOP as a chunk after + // the fragment end if the keyframe came too late. The GOP now + // starts with a non-keyframe after the fragment end but is part of + // the fragment: the fragment is extended after the end. Allow this + // situation here. + || gop.buffers.first().map_or(false, |b| { + b.buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) + }) + }) + }) + .map(|s| s.fragment_filled) + == Some(true); + + // The first stream decides how much can be dequeued, if anything at all. + // + // In chunk mode: + // If more than the fragment duration has passed until the latest GOPs earliest PTS then + // the fragment is considered filled and all GOPs until that GOP are drained. The next + // chunk would start a new fragment, and would start with the keyframe at the beginning + // of that latest GOP. + // + // Otherwise if more than a chunk duration is currently queued in GOPs of which the + // earliest PTS is known then drain everything up to that position. If nothing can be + // drained at all then advance the timeout by 1s until something can be dequeued. + // + // Otherwise: + // All complete GOPs (or at EOS everything) up to the fragment duration will be dequeued + // but on timeout in live pipelines it might happen that the first stream does not have a + // complete GOP queued. In that case nothing is dequeued for any of the streams and the + // timeout is advanced by 1s until at least one complete GOP can be dequeued. + // + // If the first stream is already EOS then the next stream that is not EOS yet will be + // taken in its place. + gst::info!( + CAT, + imp: self, + "Starting to drain at {} (fragment start {}, fragment end {}, chunk start {}, chunk end {})", + chunk_start_pts, + fragment_start_pts, + fragment_start_pts + settings.fragment_duration, + chunk_start_pts.display(), + settings.chunk_duration.map(|duration| chunk_start_pts + duration).display(), + ); + + for (idx, stream) in state.streams.iter_mut().enumerate() { + let stream_settings = stream.sinkpad.imp().settings.lock().unwrap().clone(); + + let gops = self.drain_buffers_one_stream( + settings, + stream, + timeout, + all_eos, + fragment_start_pts, + chunk_start_pts, + chunk_end_pts, + fragment_start, + fragment_filled, + )?; + stream.fragment_filled = false; + stream.chunk_filled = false; + + // If we don't have a next chunk start PTS then this is the first stream as above. + if chunk_end_pts.is_none() { + if let Some(last_gop) = gops.last() { + // Dequeued something so let's take the end PTS of the last GOP + chunk_end_pts = Some(last_gop.end_pts); + gst::info!( + CAT, + obj: stream.sinkpad, + "Draining up to PTS {} for this chunk", + last_gop.end_pts, + ); + } else { + // If nothing was dequeued for the first stream then this is OK if we're at + // EOS or this stream simply has only buffers after this chunk: we just + // consider the next stream as first stream then. + let stream_after_chunk = stream.queued_gops.back().map_or(false, |gop| { + gop.start_pts + >= if fragment_filled { + fragment_start_pts + settings.fragment_duration + } else { + chunk_start_pts + settings.chunk_duration.unwrap() + } + }); + if all_eos || stream.sinkpad.is_eos() || stream_after_chunk { + // This is handled below generally if nothing was dequeued + } else { + if settings.chunk_duration.is_some() { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Don't have anything to drain for the first stream on timeout in a live pipeline", + ); + } else { + gst::warning!( + CAT, + obj: stream.sinkpad, + "Don't have a complete GOP for the first stream on timeout in a live pipeline", + ); + } + + // In this case we advance the timeout by 1s and hope that things are + // better then. + return Err(gst_base::AGGREGATOR_FLOW_NEED_DATA); + } + } + } else if all_eos { + if let Some(last_gop) = gops.last() { + if chunk_end_pts.map_or(true, |chunk_end_pts| chunk_end_pts < last_gop.end_pts) + { + chunk_end_pts = Some(last_gop.end_pts); + } + } + } + + if gops.is_empty() { + gst::info!( + CAT, + obj: stream.sinkpad, + "Draining no buffers", + ); + + drained_streams.push(( + super::FragmentHeaderStream { + caps: stream.caps.clone(), + start_time: None, + delta_frames: stream.delta_frames, + trak_timescale: stream_settings.trak_timescale, + }, + VecDeque::new(), + )); + + continue; + } + + assert!(chunk_end_pts.is_some()); + + if let Some((prev_gop, first_gop)) = Option::zip( + stream.queued_gops.iter().find(|gop| gop.final_end_pts), + stream.queued_gops.back(), + ) { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Queued full GOPs duration updated to {}", + prev_gop.end_pts.saturating_sub(first_gop.earliest_pts), + ); + } + + gst::debug!( + CAT, + obj: stream.sinkpad, + "Queued duration updated to {}", + Option::zip(stream.queued_gops.front(), stream.queued_gops.back()) + .map(|(end, start)| end.end_pts.saturating_sub(start.start_pts)) + .unwrap_or(gst::ClockTime::ZERO) + ); + + // First flatten all GOPs into a single `Vec` + let buffers = self.flatten_gops(idx, stream, gops)?; + let ( + buffers, + earliest_pts, + earliest_pts_position, + end_pts, + start_dts, + start_dts_position, + _end_dts, + ) = match buffers { + Some(res) => res, + None => { + gst::info!( + CAT, + obj: stream.sinkpad, + "Drained only gap buffers", + ); + + drained_streams.push(( + super::FragmentHeaderStream { + caps: stream.caps.clone(), + start_time: None, + delta_frames: stream.delta_frames, + trak_timescale: stream_settings.trak_timescale, + }, + VecDeque::new(), + )); + + continue; + } + }; + + gst::info!( + CAT, + obj: stream.sinkpad, + "Draining {} worth of buffers starting at PTS {} DTS {}, DTS offset {}", + end_pts.saturating_sub(earliest_pts), + earliest_pts, + start_dts.display(), + stream.dts_offset.display(), + ); + + let start_time = if !stream.delta_frames.requires_dts() { + earliest_pts + } else { + start_dts.unwrap() + }; + + if min_earliest_pts.opt_gt(earliest_pts).unwrap_or(true) { + min_earliest_pts = Some(earliest_pts); + } + if min_earliest_pts_position + .opt_gt(earliest_pts_position) + .unwrap_or(true) + { + min_earliest_pts_position = Some(earliest_pts_position); + } + if let Some(start_dts_position) = start_dts_position { + if min_start_dts_position + .opt_gt(start_dts_position) + .unwrap_or(true) + { + min_start_dts_position = Some(start_dts_position); + } + } + + drained_streams.push(( + super::FragmentHeaderStream { + caps: stream.caps.clone(), + start_time: Some(start_time), + delta_frames: stream.delta_frames, + trak_timescale: stream_settings.trak_timescale, + }, + buffers, + )); + } + + Ok(( + drained_streams, + min_earliest_pts_position, + min_earliest_pts, + min_start_dts_position, + chunk_end_pts, + fragment_filled, + fragment_start, + )) + } + + /// Interleave drained buffers of each stream for this chunk according to the settings. + #[allow(clippy::type_complexity)] + fn interleave_buffers( + &self, + settings: &Settings, + mut drained_streams: Vec<(super::FragmentHeaderStream, VecDeque)>, + ) -> Result<(Vec, Vec), gst::FlowError> { + let mut interleaved_buffers = + Vec::with_capacity(drained_streams.iter().map(|(_, bufs)| bufs.len()).sum()); + while let Some((_idx, (_, bufs))) = + drained_streams + .iter_mut() + .enumerate() + .min_by(|(a_idx, (_, a)), (b_idx, (_, b))| { + let (a, b) = match (a.front(), b.front()) { + (None, None) => return std::cmp::Ordering::Equal, + (None, _) => return std::cmp::Ordering::Greater, + (_, None) => return std::cmp::Ordering::Less, + (Some(a), Some(b)) => (a, b), + }; + + match a.timestamp.cmp(&b.timestamp) { + std::cmp::Ordering::Equal => a_idx.cmp(b_idx), + cmp => cmp, + } + }) + { + let start_time = match bufs.front() { + None => { + // No more buffers now + break; + } + Some(buf) => buf.timestamp, + }; + let mut current_end_time = start_time; + let mut dequeued_bytes = 0; + + while settings + .interleave_bytes + .opt_ge(dequeued_bytes) + .unwrap_or(true) + && settings + .interleave_time + .opt_ge(current_end_time.saturating_sub(start_time)) + .unwrap_or(true) + { + if let Some(buffer) = bufs.pop_front() { + current_end_time = buffer.timestamp + buffer.duration; + dequeued_bytes += buffer.buffer.size() as u64; + interleaved_buffers.push(buffer); + } else { + // No buffers left in this stream, go to next stream + break; + } + } + } + + // All buffers should be consumed now + assert!(drained_streams.iter().all(|(_, bufs)| bufs.is_empty())); + + let streams = drained_streams + .into_iter() + .map(|(stream, _)| stream) + .collect::>(); + + Ok((interleaved_buffers, streams)) + } + + /// Request a force-keyunit event for the start of the next fragment. + /// + /// This is called whenever the last chunk of a fragment is pushed out. + /// + /// `chunk_end_pts` gives the time of the previously drained chunk, which + /// ideally should be lower than the next fragment starts PTS. + fn request_force_keyunit_event( + &self, + state: &State, + settings: &Settings, + upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>, + chunk_end_pts: gst::ClockTime, + ) { + let fku_time = chunk_end_pts + settings.fragment_duration; + + for stream in &state.streams { + let current_position = stream.current_position; + + // In case of ONVIF this needs to be converted back from UTC time to + // the stream's running time + let (fku_time, current_position) = + if self.obj().class().as_ref().variant == super::Variant::ONVIF { + ( + if let Some(fku_time) = utc_time_to_running_time( + fku_time, + stream.running_time_utc_time_mapping.unwrap(), + ) { + fku_time + } else { + continue; + }, + utc_time_to_running_time( + current_position, + stream.running_time_utc_time_mapping.unwrap(), + ), + ) + } else { + (fku_time, Some(current_position)) + }; + + let fku_time = + if current_position.map_or(false, |current_position| current_position > fku_time) { + gst::warning!( + CAT, + obj: stream.sinkpad, + "Sending force-keyunit event late for running time {} at {}", + fku_time, + current_position.display(), + ); + None + } else { + gst::debug!( + CAT, + obj: stream.sinkpad, + "Sending force-keyunit event for running time {}", + fku_time, + ); + Some(fku_time) + }; + + let fku = gst_video::UpstreamForceKeyUnitEvent::builder() + .running_time(fku_time) + .all_headers(true) + .build(); + + upstream_events.push((stream.sinkpad.clone(), fku)); + } + } + + /// Fills upstream events as needed and returns the caps the first time draining can happen. + /// + /// If it returns `(_, None)` then there's currently nothing to drain anymore. + fn drain_one_chunk( + &self, + state: &mut State, + settings: &Settings, + timeout: bool, + at_eos: bool, + upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>, + ) -> Result<(Option, Option), gst::FlowError> { + if at_eos { + gst::info!(CAT, imp: self, "Draining at EOS"); + } else if timeout { + gst::info!(CAT, imp: self, "Draining at timeout"); + } else { + for stream in &state.streams { + if !stream.chunk_filled && !stream.fragment_filled && !stream.sinkpad.is_eos() { + return Ok((None, None)); + } + } + gst::info!( + CAT, + imp: self, + "Draining because all streams have enough data queued" + ); + } + + // Collect all buffers and their timing information that are to be drained right now. + let ( + drained_streams, + min_earliest_pts_position, + min_earliest_pts, + min_start_dts_position, + chunk_end_pts, + fragment_filled, + fragment_start, + ) = self.drain_buffers(state, settings, timeout, at_eos)?; + + // Create header now if it was not created before and return the caps + let mut caps = None; + if state.stream_header.is_none() { + let (_, new_caps) = self.update_header(state, settings, false)?.unwrap(); + caps = Some(new_caps); + } + + // Interleave buffers according to the settings into a single vec + let (mut interleaved_buffers, mut streams) = + self.interleave_buffers(settings, drained_streams)?; + + // Offset stream start time to start at 0 in ONVIF mode, or if 'offset-to-zero' is enabled, + // instead of using the UTC time verbatim. This would be used for the tfdt box later. + // FIXME: Should this use the original DTS-or-PTS running time instead? + // That might be negative though! + if self.obj().class().as_ref().variant == super::Variant::ONVIF || settings.offset_to_zero { + let offset = if let Some(start_dts) = state.start_dts { + std::cmp::min(start_dts, state.earliest_pts.unwrap()) + } else { + state.earliest_pts.unwrap() + }; + for stream in &mut streams { + if let Some(start_time) = stream.start_time { + stream.start_time = Some(start_time.checked_sub(offset).unwrap()); + } + } + } + + if interleaved_buffers.is_empty() { + assert!(at_eos); + return Ok((caps, None)); + } + + // If there are actual buffers to output then create headers as needed and create a + // bufferlist for all buffers that have to be output. + let min_earliest_pts_position = min_earliest_pts_position.unwrap(); + let min_earliest_pts = min_earliest_pts.unwrap(); + let chunk_end_pts = chunk_end_pts.unwrap(); + + gst::debug!( + CAT, + imp: self, + concat!( + "Draining chunk (fragment start: {} fragment end: {}) ", + "from PTS {} to {}" + ), + fragment_start, + fragment_filled, + min_earliest_pts, + chunk_end_pts, + ); + + let mut fmp4_header = None; + if !state.sent_headers { + let mut buffer = state.stream_header.as_ref().unwrap().copy(); + { + let buffer = buffer.get_mut().unwrap(); + + buffer.set_pts(min_earliest_pts_position); + buffer.set_dts(min_start_dts_position); + + // Header is DISCONT|HEADER + buffer.set_flags(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER); + } + + fmp4_header = Some(buffer); + + state.sent_headers = true; + } + + // TODO: Write prft boxes before moof + // TODO: Write sidx boxes before moof and rewrite once offsets are known + + // First sequence number must be 1 + if state.sequence_number == 0 { + state.sequence_number = 1; + } + let sequence_number = state.sequence_number; + // If this is the last chunk of a fragment then increment the sequence number for the + // start of the next fragment. + if fragment_filled { + state.sequence_number += 1; + } + let (mut fmp4_fragment_header, moof_offset) = + boxes::create_fmp4_fragment_header(super::FragmentHeaderConfiguration { + variant: self.obj().class().as_ref().variant, + sequence_number, + chunk: !fragment_start, + streams: streams.as_slice(), + buffers: interleaved_buffers.as_slice(), + }) + .map_err(|err| { + gst::error!( + CAT, + imp: self, + "Failed to create FMP4 fragment header: {}", + err + ); + gst::FlowError::Error + })?; + + { + let buffer = fmp4_fragment_header.get_mut().unwrap(); + buffer.set_pts(min_earliest_pts_position); + buffer.set_dts(min_start_dts_position); + buffer.set_duration(chunk_end_pts.checked_sub(min_earliest_pts)); + + // Fragment and chunk header is HEADER + buffer.set_flags(gst::BufferFlags::HEADER); + // Chunk header is DELTA_UNIT + if !fragment_start { + buffer.set_flags(gst::BufferFlags::DELTA_UNIT); + } + + // Copy metas from the first actual buffer to the fragment header. This allows + // getting things like the reference timestamp meta or the timecode meta to identify + // the fragment. + let _ = interleaved_buffers[0].buffer.copy_into( + buffer, + gst::BufferCopyFlags::META, + 0, + None, + ); + } + + let moof_offset = state.current_offset + + fmp4_header.as_ref().map(|h| h.size()).unwrap_or(0) as u64 + + moof_offset; + + let buffers_len = interleaved_buffers.len(); + for (idx, buffer) in interleaved_buffers.iter_mut().enumerate() { + // Fix up buffer flags, all other buffers are DELTA_UNIT + let buffer_ref = buffer.buffer.make_mut(); + buffer_ref.unset_flags(gst::BufferFlags::all()); + buffer_ref.set_flags(gst::BufferFlags::DELTA_UNIT); + + // Set the marker flag for the last buffer of the segment + if idx == buffers_len - 1 { + buffer_ref.set_flags(gst::BufferFlags::MARKER); + } + } + + let buffer_list = fmp4_header + .into_iter() + .chain(Some(fmp4_fragment_header)) + .chain(interleaved_buffers.into_iter().map(|buffer| buffer.buffer)) + .inspect(|b| { + state.current_offset += b.size() as u64; + }) + .collect::(); + + if settings.write_mfra && fragment_start { + // Write mfra only for the main stream on fragment starts, and if there are no + // buffers for the main stream in this segment then don't write anything. + if let Some(super::FragmentHeaderStream { + start_time: Some(start_time), + .. + }) = streams.get(0) + { + state.fragment_offsets.push(super::FragmentOffset { + time: *start_time, + offset: moof_offset, + }); + } + } + + state.end_pts = Some(chunk_end_pts); + + // Update for the start PTS of the next fragment / chunk + + if fragment_filled { + state.fragment_start_pts = Some(chunk_end_pts); + gst::info!(CAT, imp: self, "Starting new fragment at {}", chunk_end_pts,); + } else { + gst::info!(CAT, imp: self, "Starting new chunk at {}", chunk_end_pts,); + } + state.chunk_start_pts = Some(chunk_end_pts); + + // If the current fragment is filled we already have the next fragment's start + // keyframe and can request the following one. + if fragment_filled { + self.request_force_keyunit_event(state, settings, upstream_events, chunk_end_pts); + } + + // Reset timeout delay now that we've output an actual fragment or chunk + state.timeout_delay = gst::ClockTime::ZERO; + + // TODO: Write edit list at EOS + // TODO: Rewrite bitrates at EOS + + Ok((caps, Some(buffer_list))) + } + + /// Drain all chunks that can currently be drained. + /// + /// On error the `caps`, `buffers` or `upstream_events` can contain data of already finished + /// chunks that were complete before the error. + #[allow(clippy::too_many_arguments)] + fn drain( + &self, + state: &mut State, + settings: &Settings, + all_eos: bool, + mut timeout: bool, + caps: &mut Option, + buffers: &mut Vec, + upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>, + ) -> Result<(), gst::FlowError> { + // Loop as long as new chunks can be drained. + loop { + // If enough GOPs were queued, drain and create the output fragment or chunk + let res = self.drain_one_chunk(state, settings, timeout, all_eos, upstream_events); + let mut buffer_list = match res { + Ok((new_caps, buffer_list)) => { + if caps.is_none() { + *caps = new_caps; + } + + buffer_list + } + Err(err) => { + if err == gst_base::AGGREGATOR_FLOW_NEED_DATA { + assert!(!all_eos); + gst::debug!(CAT, imp: self, "Need more data"); + state.timeout_delay += 1.seconds(); + } + + return Err(err); + } + }; + + // If nothing can't be drained anymore then break the loop, and if all streams are + // EOS add the footers. + if buffer_list.is_none() { + if settings.write_mfra && all_eos { + gst::debug!(CAT, imp: self, "Writing mfra box"); + match boxes::create_mfra(&state.streams[0].caps, &state.fragment_offsets) { + Ok(mut mfra) => { + { + let mfra = mfra.get_mut().unwrap(); + // mfra is DELTA_UNIT like other buffers + mfra.set_flags(gst::BufferFlags::DELTA_UNIT); + } + + if buffer_list.is_none() { + buffer_list = Some(gst::BufferList::new_sized(1)); + } + buffer_list.as_mut().unwrap().get_mut().unwrap().add(mfra); + buffers.extend(buffer_list); + } + Err(err) => { + gst::error!(CAT, imp: self, "Failed to create mfra box: {}", err); + } + } + } + + break Ok(()); + } + + // Otherwise extend the list of bufferlists and check again if something can be + // drained. + buffers.extend(buffer_list); + + // Only the first iteration is considered a timeout. + timeout = false; + + let fragment_start_pts = state.fragment_start_pts; + let chunk_start_pts = state.chunk_start_pts; + for stream in &mut state.streams { + // Check if this stream is still filled enough now. + self.check_stream_filled( + settings, + stream, + fragment_start_pts, + chunk_start_pts, + all_eos, + ); + } + + // And try draining a fragment again + } + } + + /// Create all streams. + fn create_streams(&self, state: &mut State) -> Result<(), gst::FlowError> { + for pad in self + .obj() + .sink_pads() + .into_iter() + .map(|pad| pad.downcast::().unwrap()) + { + let caps = match pad.current_caps() { + Some(caps) => caps, + None => { + gst::warning!(CAT, obj: pad, "Skipping pad without caps"); + continue; + } + }; + + gst::info!(CAT, obj: pad, "Configuring caps {:?}", caps); + + let s = caps.structure(0).unwrap(); + + let mut delta_frames = DeltaFrames::IntraOnly; + match s.name().as_str() { + "video/x-h264" | "video/x-h265" => { + if !s.has_field_with_type("codec_data", gst::Buffer::static_type()) { + gst::error!(CAT, obj: pad, "Received caps without codec_data"); + return Err(gst::FlowError::NotNegotiated); + } + delta_frames = DeltaFrames::Bidirectional; + } + "video/x-vp8" => { + delta_frames = DeltaFrames::PredictiveOnly; + } + "video/x-vp9" => { + if !s.has_field_with_type("colorimetry", str::static_type()) { + gst::error!(CAT, obj: pad, "Received caps without colorimetry"); + return Err(gst::FlowError::NotNegotiated); + } + delta_frames = DeltaFrames::PredictiveOnly; + } + "video/x-av1" => { + delta_frames = DeltaFrames::PredictiveOnly; + } + "image/jpeg" => (), + "audio/mpeg" => { + if !s.has_field_with_type("codec_data", gst::Buffer::static_type()) { + gst::error!(CAT, obj: pad, "Received caps without codec_data"); + return Err(gst::FlowError::NotNegotiated); + } + } + "audio/x-opus" => { + if let Some(header) = s + .get::("streamheader") + .ok() + .and_then(|a| a.get(0).and_then(|v| v.get::().ok())) + { + if gst_pbutils::codec_utils_opus_parse_header(&header, None).is_err() { + gst::error!(CAT, obj: pad, "Received invalid Opus header"); + return Err(gst::FlowError::NotNegotiated); + } + } else if gst_pbutils::codec_utils_opus_parse_caps(&caps, None).is_err() { + gst::error!(CAT, obj: pad, "Received invalid Opus caps"); + return Err(gst::FlowError::NotNegotiated); + } + } + "audio/x-alaw" | "audio/x-mulaw" => (), + "audio/x-adpcm" => (), + "application/x-onvif-metadata" => (), + _ => unreachable!(), + } + + state.streams.push(Stream { + sinkpad: pad, + caps, + delta_frames, + pre_queue: VecDeque::new(), + queued_gops: VecDeque::new(), + fragment_filled: false, + chunk_filled: false, + dts_offset: None, + current_position: gst::ClockTime::ZERO, + running_time_utc_time_mapping: None, + }); + } + + if state.streams.is_empty() { + gst::error!(CAT, imp: self, "No streams available"); + return Err(gst::FlowError::Error); + } + + // Sort video streams first and then audio streams and then metadata streams, and each group by pad name. + state.streams.sort_by(|a, b| { + let order_of_caps = |caps: &gst::CapsRef| { + let s = caps.structure(0).unwrap(); + + if s.name().starts_with("video/") { + 0 + } else if s.name().starts_with("audio/") { + 1 + } else if s.name().starts_with("application/x-onvif-metadata") { + 2 + } else { + unimplemented!(); + } + }; + + let st_a = order_of_caps(&a.caps); + let st_b = order_of_caps(&b.caps); + + if st_a == st_b { + return a.sinkpad.name().cmp(&b.sinkpad.name()); + } + + st_a.cmp(&st_b) + }); + + Ok(()) + } + + /// Generate an updated header at the end and the corresponding caps with the new streamheader. + fn update_header( + &self, + state: &mut State, + settings: &Settings, + at_eos: bool, + ) -> Result, gst::FlowError> { + let aggregator = self.obj(); + let class = aggregator.class(); + let variant = class.as_ref().variant; + + if settings.header_update_mode == super::HeaderUpdateMode::None && at_eos { + return Ok(None); + } + + assert!(!at_eos || state.streams.iter().all(|s| s.queued_gops.is_empty())); + + let duration = state + .end_pts + .opt_checked_sub(state.earliest_pts) + .ok() + .flatten(); + + let streams = state + .streams + .iter() + .map(|s| super::HeaderStream { + trak_timescale: s.sinkpad.imp().settings.lock().unwrap().trak_timescale, + delta_frames: s.delta_frames, + caps: s.caps.clone(), + }) + .collect::>(); + + let mut buffer = boxes::create_fmp4_header(super::HeaderConfiguration { + variant, + update: at_eos, + movie_timescale: settings.movie_timescale, + streams, + write_mehd: settings.write_mehd, + duration: if at_eos { duration } else { None }, + start_utc_time: if variant == super::Variant::ONVIF { + state + .earliest_pts + .map(|unix| unix.nseconds() / 100 + UNIX_1601_OFFSET * 10_000_000) + } else { + None + }, + }) + .map_err(|err| { + gst::error!(CAT, imp: self, "Failed to create FMP4 header: {}", err); + gst::FlowError::Error + })?; + + { + let buffer = buffer.get_mut().unwrap(); + + // No timestamps + + // Header is DISCONT|HEADER + buffer.set_flags(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER); + } + + // Remember stream header for later + state.stream_header = Some(buffer.clone()); + + let variant = match variant { + super::Variant::ISO | super::Variant::DASH | super::Variant::ONVIF => "iso-fragmented", + super::Variant::CMAF => "cmaf", + }; + let caps = gst::Caps::builder("video/quicktime") + .field("variant", variant) + .field("streamheader", gst::Array::new([&buffer])) + .build(); + + let mut list = gst::BufferList::new_sized(1); + { + let list = list.get_mut().unwrap(); + list.add(buffer); + } + + Ok(Some((list, caps))) + } + + /// Finish the stream be rewriting / updating headers. + fn finish(&self, settings: &Settings) { + // Do remaining EOS handling after the end of the stream was pushed. + gst::debug!(CAT, imp: self, "Doing EOS handling"); + + if settings.header_update_mode == super::HeaderUpdateMode::None { + // Need to output new headers if started again after EOS + self.state.lock().unwrap().sent_headers = false; + return; + } + + let updated_header = self.update_header(&mut self.state.lock().unwrap(), settings, true); + match updated_header { + Ok(Some((buffer_list, caps))) => { + match settings.header_update_mode { + super::HeaderUpdateMode::None => unreachable!(), + super::HeaderUpdateMode::Rewrite => { + let mut q = gst::query::Seeking::new(gst::Format::Bytes); + if self.obj().src_pad().peer_query(&mut q) && q.result().0 { + let aggregator = self.obj(); + + aggregator.set_src_caps(&caps); + + // Seek to the beginning with a default bytes segment + aggregator.update_segment( + &gst::FormattedSegment::::new(), + ); + + if let Err(err) = aggregator.finish_buffer_list(buffer_list) { + gst::error!( + CAT, + imp: self, + "Failed pushing updated header buffer downstream: {:?}", + err, + ); + } + } else { + gst::error!( + CAT, + imp: self, + "Can't rewrite header because downstream is not seekable" + ); + } + } + super::HeaderUpdateMode::Update => { + let aggregator = self.obj(); + + aggregator.set_src_caps(&caps); + if let Err(err) = aggregator.finish_buffer_list(buffer_list) { + gst::error!( + CAT, + imp: self, + "Failed pushing updated header buffer downstream: {:?}", + err, + ); + } + } + } + } + Ok(None) => {} + Err(err) => { + gst::error!( + CAT, + imp: self, + "Failed to generate updated header: {:?}", + err + ); + } + } + + // Need to output new headers if started again after EOS + self.state.lock().unwrap().sent_headers = false; + } +} + +#[glib::object_subclass] +impl ObjectSubclass for FMP4Mux { + const NAME: &'static str = "GstFMP4Mux"; + type Type = super::FMP4Mux; + type ParentType = gst_base::Aggregator; + type Class = Class; +} + +impl ObjectImpl for FMP4Mux { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![ + glib::ParamSpecUInt64::builder("fragment-duration") + .nick("Fragment Duration") + .blurb("Duration for each FMP4 fragment in nanoseconds") + .default_value(DEFAULT_FRAGMENT_DURATION.nseconds()) + .mutable_ready() + .build(), + glib::ParamSpecUInt64::builder("chunk-duration") + .nick("Chunk Duration") + .blurb("Duration for each FMP4 chunk (default = no chunks)") + .default_value(u64::MAX) + .mutable_ready() + .build(), + glib::ParamSpecEnum::builder_with_default("header-update-mode", DEFAULT_HEADER_UPDATE_MODE) + .nick("Header update mode") + .blurb("Mode for updating the header at the end of the stream") + .mutable_ready() + .build(), + glib::ParamSpecBoolean::builder("write-mfra") + .nick("Write mfra box") + .blurb("Write fragment random access box at the end of the stream") + .default_value(DEFAULT_WRITE_MFRA) + .mutable_ready() + .build(), + glib::ParamSpecBoolean::builder("write-mehd") + .nick("Write mehd box") + .blurb("Write movie extends header box with the duration at the end of the stream (needs a header-update-mode enabled)") + .default_value(DEFAULT_WRITE_MFRA) + .mutable_ready() + .build(), + glib::ParamSpecUInt64::builder("interleave-bytes") + .nick("Interleave Bytes") + .blurb("Interleave between streams in bytes") + .default_value(DEFAULT_INTERLEAVE_BYTES.unwrap_or(0)) + .mutable_ready() + .build(), + glib::ParamSpecUInt64::builder("interleave-time") + .nick("Interleave Time") + .blurb("Interleave between streams in nanoseconds") + .default_value(DEFAULT_INTERLEAVE_TIME.map(gst::ClockTime::nseconds).unwrap_or(u64::MAX)) + .mutable_ready() + .build(), + glib::ParamSpecUInt::builder("movie-timescale") + .nick("Movie Timescale") + .blurb("Timescale to use for the movie (units per second, 0 is automatic)") + .mutable_ready() + .build(), + ] + }); + + &PROPERTIES + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + match pspec.name() { + "fragment-duration" => { + let mut settings = self.settings.lock().unwrap(); + let fragment_duration = value.get().expect("type checked upstream"); + if settings.fragment_duration != fragment_duration { + settings.fragment_duration = fragment_duration; + let latency = settings + .chunk_duration + .unwrap_or(settings.fragment_duration); + drop(settings); + self.obj().set_latency(latency, None); + } + } + + "chunk-duration" => { + let mut settings = self.settings.lock().unwrap(); + let chunk_duration = value.get().expect("type checked upstream"); + if settings.chunk_duration != chunk_duration { + settings.chunk_duration = chunk_duration; + let latency = settings + .chunk_duration + .unwrap_or(settings.fragment_duration); + drop(settings); + self.obj().set_latency(latency, None); + } + } + + "header-update-mode" => { + let mut settings = self.settings.lock().unwrap(); + settings.header_update_mode = value.get().expect("type checked upstream"); + } + + "write-mfra" => { + let mut settings = self.settings.lock().unwrap(); + settings.write_mfra = value.get().expect("type checked upstream"); + } + + "write-mehd" => { + let mut settings = self.settings.lock().unwrap(); + settings.write_mehd = value.get().expect("type checked upstream"); + } + + "interleave-bytes" => { + let mut settings = self.settings.lock().unwrap(); + settings.interleave_bytes = match value.get().expect("type checked upstream") { + 0 => None, + v => Some(v), + }; + } + + "interleave-time" => { + let mut settings = self.settings.lock().unwrap(); + settings.interleave_time = match value.get().expect("type checked upstream") { + Some(gst::ClockTime::ZERO) | None => None, + v => v, + }; + } + + "movie-timescale" => { + let mut settings = self.settings.lock().unwrap(); + settings.movie_timescale = value.get().expect("type checked upstream"); + } + + _ => unimplemented!(), + } + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + match pspec.name() { + "fragment-duration" => { + let settings = self.settings.lock().unwrap(); + settings.fragment_duration.to_value() + } + + "chunk-duration" => { + let settings = self.settings.lock().unwrap(); + settings.chunk_duration.to_value() + } + + "header-update-mode" => { + let settings = self.settings.lock().unwrap(); + settings.header_update_mode.to_value() + } + + "write-mfra" => { + let settings = self.settings.lock().unwrap(); + settings.write_mfra.to_value() + } + + "write-mehd" => { + let settings = self.settings.lock().unwrap(); + settings.write_mehd.to_value() + } + + "interleave-bytes" => { + let settings = self.settings.lock().unwrap(); + settings.interleave_bytes.unwrap_or(0).to_value() + } + + "interleave-time" => { + let settings = self.settings.lock().unwrap(); + settings.interleave_time.to_value() + } + + "movie-timescale" => { + let settings = self.settings.lock().unwrap(); + settings.movie_timescale.to_value() + } + + _ => unimplemented!(), + } + } + + fn constructed(&self) { + self.parent_constructed(); + + let obj = self.obj(); + let class = obj.class(); + for templ in class.pad_template_list().into_iter().filter(|templ| { + templ.presence() == gst::PadPresence::Always + && templ.direction() == gst::PadDirection::Sink + }) { + let sinkpad = gst::PadBuilder::::from_template(&templ) + .flags(gst::PadFlags::ACCEPT_INTERSECT) + .build(); + + obj.add_pad(&sinkpad).unwrap(); + } + + obj.set_latency(Settings::default().fragment_duration, None); + } +} + +impl GstObjectImpl for FMP4Mux {} + +impl ElementImpl for FMP4Mux { + fn request_new_pad( + &self, + templ: &gst::PadTemplate, + name: Option<&str>, + caps: Option<&gst::Caps>, + ) -> Option { + let state = self.state.lock().unwrap(); + if state.stream_header.is_some() { + gst::error!( + CAT, + imp: self, + "Can't request new pads after header was generated" + ); + return None; + } + + self.parent_request_new_pad(templ, name, caps) + } +} + +impl AggregatorImpl for FMP4Mux { + fn next_time(&self) -> Option { + let state = self.state.lock().unwrap(); + state.chunk_start_pts.opt_add(state.timeout_delay) + } + + fn sink_query( + &self, + aggregator_pad: &gst_base::AggregatorPad, + query: &mut gst::QueryRef, + ) -> bool { + use gst::QueryViewMut; + + gst::trace!(CAT, obj: aggregator_pad, "Handling query {:?}", query); + + match query.view_mut() { + QueryViewMut::Caps(q) => { + let mut allowed_caps = aggregator_pad + .current_caps() + .unwrap_or_else(|| aggregator_pad.pad_template_caps()); + + // Allow framerate change + for s in allowed_caps.make_mut().iter_mut() { + s.remove_field("framerate"); + } + + if let Some(filter_caps) = q.filter() { + let res = filter_caps + .intersect_with_mode(&allowed_caps, gst::CapsIntersectMode::First); + q.set_result(&res); + } else { + q.set_result(&allowed_caps); + } + + true + } + _ => self.parent_sink_query(aggregator_pad, query), + } + } + + fn sink_event_pre_queue( + &self, + aggregator_pad: &gst_base::AggregatorPad, + mut event: gst::Event, + ) -> Result { + use gst::EventView; + + gst::trace!(CAT, obj: aggregator_pad, "Handling event {:?}", event); + + match event.view() { + EventView::Segment(ev) => { + if ev.segment().format() != gst::Format::Time { + gst::warning!( + CAT, + obj: aggregator_pad, + "Received non-TIME segment, replacing with default TIME segment" + ); + let segment = gst::FormattedSegment::::new(); + event = gst::event::Segment::builder(&segment) + .seqnum(event.seqnum()) + .build(); + } + self.parent_sink_event_pre_queue(aggregator_pad, event) + } + _ => self.parent_sink_event_pre_queue(aggregator_pad, event), + } + } + + fn sink_event(&self, aggregator_pad: &gst_base::AggregatorPad, event: gst::Event) -> bool { + use gst::EventView; + + gst::trace!(CAT, obj: aggregator_pad, "Handling event {:?}", event); + + match event.view() { + EventView::Segment(ev) => { + // Already fixed-up above to always be a TIME segment + let segment = ev + .segment() + .clone() + .downcast::() + .expect("non-TIME segment"); + gst::info!(CAT, obj: aggregator_pad, "Received segment {:?}", segment); + + // Only forward the segment event verbatim if this is a single stream variant. + // Otherwise we have to produce a default segment and re-timestamp all buffers + // with their running time. + let aggregator = self.obj(); + let class = aggregator.class(); + if class.as_ref().variant.is_single_stream() { + aggregator.update_segment(&segment); + } + + self.parent_sink_event(aggregator_pad, event) + } + EventView::Tag(_ev) => { + // TODO: Maybe store for putting into the headers of the next fragment? + + self.parent_sink_event(aggregator_pad, event) + } + _ => self.parent_sink_event(aggregator_pad, event), + } + } + + fn src_query(&self, query: &mut gst::QueryRef) -> bool { + use gst::QueryViewMut; + + gst::trace!(CAT, imp: self, "Handling query {:?}", query); + + match query.view_mut() { + QueryViewMut::Seeking(q) => { + // We can't really handle seeking, it would break everything + q.set(false, gst::ClockTime::ZERO, gst::ClockTime::NONE); + true + } + _ => self.parent_src_query(query), + } + } + + fn src_event(&self, event: gst::Event) -> bool { + use gst::EventView; + + gst::trace!(CAT, imp: self, "Handling event {:?}", event); + + match event.view() { + EventView::Seek(_ev) => false, + _ => self.parent_src_event(event), + } + } + + fn flush(&self) -> Result { + let mut state = self.state.lock().unwrap(); + + for stream in &mut state.streams { + stream.queued_gops.clear(); + stream.dts_offset = None; + stream.current_position = gst::ClockTime::ZERO; + stream.fragment_filled = false; + stream.pre_queue.clear(); + stream.running_time_utc_time_mapping = None; + } + + state.current_offset = 0; + state.fragment_offsets.clear(); + + drop(state); + + self.parent_flush() + } + + fn stop(&self) -> Result<(), gst::ErrorMessage> { + gst::trace!(CAT, imp: self, "Stopping"); + + let _ = self.parent_stop(); + + *self.state.lock().unwrap() = State::default(); + + Ok(()) + } + + fn start(&self) -> Result<(), gst::ErrorMessage> { + gst::trace!(CAT, imp: self, "Starting"); + + self.parent_start()?; + + // For non-single-stream variants configure a default segment that allows for negative + // DTS so that we can correctly re-timestamp buffers with their running times. + let aggregator = self.obj(); + let class = aggregator.class(); + if !class.as_ref().variant.is_single_stream() { + let mut segment = gst::FormattedSegment::::new(); + segment.set_start(SEGMENT_OFFSET); + segment.set_position(SEGMENT_OFFSET); + aggregator.update_segment(&segment); + } + + *self.state.lock().unwrap() = State::default(); + + Ok(()) + } + + fn negotiate(&self) -> bool { + true + } + + fn aggregate(&self, timeout: bool) -> Result { + let settings = self.settings.lock().unwrap().clone(); + + let all_eos; + let mut caps = None; + let mut buffers = vec![]; + let mut upstream_events = vec![]; + let res = { + let mut state = self.state.lock().unwrap(); + + // Create streams + if state.streams.is_empty() { + self.create_streams(&mut state)?; + } + + self.queue_available_buffers(&mut state, &settings, timeout)?; + + all_eos = state.streams.iter().all(|stream| stream.sinkpad.is_eos()); + if all_eos { + gst::debug!(CAT, imp: self, "All streams are EOS now"); + + let fragment_start_pts = state.fragment_start_pts; + let chunk_start_pts = state.chunk_start_pts; + + for stream in &mut state.streams { + // Check if this stream is filled enough now that everything is EOS. + self.check_stream_filled( + &settings, + stream, + fragment_start_pts, + chunk_start_pts, + true, + ); + } + } + + // Calculate the earliest PTS, i.e. the start of the first fragment, if not known yet. + self.calculate_earliest_pts( + &settings, + &mut state, + &mut upstream_events, + all_eos, + timeout, + ); + + // Drain everything that can be drained at this point + self.drain( + &mut state, + &settings, + all_eos, + timeout, + &mut caps, + &mut buffers, + &mut upstream_events, + ) + }; + + for (sinkpad, event) in upstream_events { + sinkpad.push_event(event); + } + + if let Some(caps) = caps { + gst::debug!(CAT, imp: self, "Setting caps on source pad: {:?}", caps); + self.obj().set_src_caps(&caps); + } + + for buffer_list in buffers { + gst::trace!(CAT, imp: self, "Pushing buffer list {:?}", buffer_list); + self.obj().finish_buffer_list(buffer_list)?; + } + + // If an error happened above while draining, return this now after pushing + // any output that was produced before the error. + res?; + + if !all_eos { + return Ok(gst::FlowSuccess::Ok); + } + + // Finish the stream. + self.finish(&settings); + + Err(gst::FlowError::Eos) + } +} + +#[repr(C)] +pub(crate) struct Class { + parent: gst_base::ffi::GstAggregatorClass, + variant: super::Variant, +} + +unsafe impl ClassStruct for Class { + type Type = FMP4Mux; +} + +impl std::ops::Deref for Class { + type Target = glib::Class; + + fn deref(&self) -> &Self::Target { + unsafe { &*(&self.parent as *const _ as *const _) } + } +} + +unsafe impl IsSubclassable for super::FMP4Mux { + fn class_init(class: &mut glib::Class) { + Self::parent_class_init::(class); + + let class = class.as_mut(); + class.variant = T::VARIANT; + } +} + +pub(crate) trait FMP4MuxImpl: AggregatorImpl { + const VARIANT: super::Variant; +} + +#[derive(Default)] +pub(crate) struct ISOFMP4Mux; + +#[glib::object_subclass] +impl ObjectSubclass for ISOFMP4Mux { + const NAME: &'static str = "GstISOFMP4Mux"; + type Type = super::ISOFMP4Mux; + type ParentType = super::FMP4Mux; +} + +impl ObjectImpl for ISOFMP4Mux { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![glib::ParamSpecBoolean::builder("offset-to-zero") + .nick("Offset to Zero") + .blurb("Offsets all streams so that the earliest stream starts at 0") + .mutable_ready() + .build()] + }); + + &PROPERTIES + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + let obj = self.obj(); + let fmp4mux = obj.upcast_ref::().imp(); + + match pspec.name() { + "offset-to-zero" => { + let settings = fmp4mux.settings.lock().unwrap(); + settings.offset_to_zero.to_value() + } + + _ => unimplemented!(), + } + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + let obj = self.obj(); + let fmp4mux = obj.upcast_ref::().imp(); + + match pspec.name() { + "offset-to-zero" => { + let mut settings = fmp4mux.settings.lock().unwrap(); + settings.offset_to_zero = value.get().expect("type checked upstream"); + } + + _ => unimplemented!(), + } + } +} + +impl GstObjectImpl for ISOFMP4Mux {} + +impl ElementImpl for ISOFMP4Mux { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "ISOFMP4Mux", + "Codec/Muxer", + "ISO fragmented MP4 muxer", + "Sebastian Dröge ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let src_pad_template = gst::PadTemplate::new( + "src", + gst::PadDirection::Src, + gst::PadPresence::Always, + &gst::Caps::builder("video/quicktime") + .field("variant", "iso-fragmented") + .build(), + ) + .unwrap(); + + let sink_pad_template = gst::PadTemplate::with_gtype( + "sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &[ + gst::Structure::builder("video/x-h264") + .field("stream-format", gst::List::new(["avc", "avc3"])) + .field("alignment", "au") + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-h265") + .field("stream-format", gst::List::new(["hvc1", "hev1"])) + .field("alignment", "au") + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-vp8") + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-vp9") + .field("profile", gst::List::new(["0", "1", "2", "3"])) + .field("chroma-format", gst::List::new(["4:2:0", "4:2:2", "4:4:4"])) + .field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32])) + .field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32])) + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-av1") + .field("stream-format", "obu-stream") + .field("alignment", "tu") + .field("profile", gst::List::new(["main", "high", "professional"])) + .field( + "chroma-format", + gst::List::new(["4:0:0", "4:2:0", "4:2:2", "4:4:4"]), + ) + .field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32])) + .field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32])) + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("audio/mpeg") + .field("mpegversion", 4i32) + .field("stream-format", "raw") + .field("channels", gst::IntRange::new(1, u16::MAX as i32)) + .field("rate", gst::IntRange::new(1, i32::MAX)) + .build(), + gst::Structure::builder("audio/x-opus") + .field("channel-mapping-family", gst::IntRange::new(0i32, 255)) + .field("channels", gst::IntRange::new(1i32, 8)) + .field("rate", gst::IntRange::new(1, i32::MAX)) + .build(), + ] + .into_iter() + .collect::(), + super::FMP4MuxPad::static_type(), + ) + .unwrap(); + + vec![src_pad_template, sink_pad_template] + }); + + PAD_TEMPLATES.as_ref() + } +} + +impl AggregatorImpl for ISOFMP4Mux {} + +impl FMP4MuxImpl for ISOFMP4Mux { + const VARIANT: super::Variant = super::Variant::ISO; +} + +#[derive(Default)] +pub(crate) struct CMAFMux; + +#[glib::object_subclass] +impl ObjectSubclass for CMAFMux { + const NAME: &'static str = "GstCMAFMux"; + type Type = super::CMAFMux; + type ParentType = super::FMP4Mux; +} + +impl ObjectImpl for CMAFMux {} + +impl GstObjectImpl for CMAFMux {} + +impl ElementImpl for CMAFMux { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "CMAFMux", + "Codec/Muxer", + "CMAF fragmented MP4 muxer", + "Sebastian Dröge ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let src_pad_template = gst::PadTemplate::new( + "src", + gst::PadDirection::Src, + gst::PadPresence::Always, + &gst::Caps::builder("video/quicktime") + .field("variant", "cmaf") + .build(), + ) + .unwrap(); + + let sink_pad_template = gst::PadTemplate::with_gtype( + "sink", + gst::PadDirection::Sink, + gst::PadPresence::Always, + &[ + gst::Structure::builder("video/x-h264") + .field("stream-format", gst::List::new(["avc", "avc3"])) + .field("alignment", "au") + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-h265") + .field("stream-format", gst::List::new(["hvc1", "hev1"])) + .field("alignment", "au") + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("audio/mpeg") + .field("mpegversion", 4i32) + .field("stream-format", "raw") + .field("channels", gst::IntRange::new(1, u16::MAX as i32)) + .field("rate", gst::IntRange::new(1, i32::MAX)) + .build(), + ] + .into_iter() + .collect::(), + super::FMP4MuxPad::static_type(), + ) + .unwrap(); + + vec![src_pad_template, sink_pad_template] + }); + + PAD_TEMPLATES.as_ref() + } +} + +impl AggregatorImpl for CMAFMux {} + +impl FMP4MuxImpl for CMAFMux { + const VARIANT: super::Variant = super::Variant::CMAF; +} + +#[derive(Default)] +pub(crate) struct DASHMP4Mux; + +#[glib::object_subclass] +impl ObjectSubclass for DASHMP4Mux { + const NAME: &'static str = "GstDASHMP4Mux"; + type Type = super::DASHMP4Mux; + type ParentType = super::FMP4Mux; +} + +impl ObjectImpl for DASHMP4Mux {} + +impl GstObjectImpl for DASHMP4Mux {} + +impl ElementImpl for DASHMP4Mux { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "DASHMP4Mux", + "Codec/Muxer", + "DASH fragmented MP4 muxer", + "Sebastian Dröge ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let src_pad_template = gst::PadTemplate::new( + "src", + gst::PadDirection::Src, + gst::PadPresence::Always, + &gst::Caps::builder("video/quicktime") + .field("variant", "iso-fragmented") + .build(), + ) + .unwrap(); + + let sink_pad_template = gst::PadTemplate::with_gtype( + "sink", + gst::PadDirection::Sink, + gst::PadPresence::Always, + &[ + gst::Structure::builder("video/x-h264") + .field("stream-format", gst::List::new(["avc", "avc3"])) + .field("alignment", "au") + .field("width", gst::IntRange::::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-h265") + .field("stream-format", gst::List::new(["hvc1", "hev1"])) + .field("alignment", "au") + .field("width", gst::IntRange::::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-vp8") + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-vp9") + .field("profile", gst::List::new(["0", "1", "2", "3"])) + .field("chroma-format", gst::List::new(["4:2:0", "4:2:2", "4:4:4"])) + .field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32])) + .field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32])) + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-av1") + .field("stream-format", "obu-stream") + .field("alignment", "tu") + .field("profile", gst::List::new(["main", "high", "professional"])) + .field( + "chroma-format", + gst::List::new(["4:0:0", "4:2:0", "4:2:2", "4:4:4"]), + ) + .field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32])) + .field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32])) + .field("width", gst::IntRange::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("audio/mpeg") + .field("mpegversion", 4i32) + .field("stream-format", "raw") + .field("channels", gst::IntRange::::new(1, u16::MAX as i32)) + .field("rate", gst::IntRange::::new(1, i32::MAX)) + .build(), + gst::Structure::builder("audio/x-opus") + .field("channel-mapping-family", gst::IntRange::new(0i32, 255)) + .field("channels", gst::IntRange::new(1i32, 8)) + .field("rate", gst::IntRange::new(1, i32::MAX)) + .build(), + ] + .into_iter() + .collect::(), + super::FMP4MuxPad::static_type(), + ) + .unwrap(); + + vec![src_pad_template, sink_pad_template] + }); + + PAD_TEMPLATES.as_ref() + } +} + +impl AggregatorImpl for DASHMP4Mux {} + +impl FMP4MuxImpl for DASHMP4Mux { + const VARIANT: super::Variant = super::Variant::DASH; +} + +#[derive(Default)] +pub(crate) struct ONVIFFMP4Mux; + +#[glib::object_subclass] +impl ObjectSubclass for ONVIFFMP4Mux { + const NAME: &'static str = "GstONVIFFMP4Mux"; + type Type = super::ONVIFFMP4Mux; + type ParentType = super::FMP4Mux; +} + +impl ObjectImpl for ONVIFFMP4Mux {} + +impl GstObjectImpl for ONVIFFMP4Mux {} + +impl ElementImpl for ONVIFFMP4Mux { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "ONVIFFMP4Mux", + "Codec/Muxer", + "ONVIF fragmented MP4 muxer", + "Sebastian Dröge ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let src_pad_template = gst::PadTemplate::new( + "src", + gst::PadDirection::Src, + gst::PadPresence::Always, + &gst::Caps::builder("video/quicktime") + .field("variant", "iso-fragmented") + .build(), + ) + .unwrap(); + + let sink_pad_template = gst::PadTemplate::with_gtype( + "sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &[ + gst::Structure::builder("video/x-h264") + .field("stream-format", gst::List::new(["avc", "avc3"])) + .field("alignment", "au") + .field("width", gst::IntRange::::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("video/x-h265") + .field("stream-format", gst::List::new(["hvc1", "hev1"])) + .field("alignment", "au") + .field("width", gst::IntRange::::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("image/jpeg") + .field("width", gst::IntRange::::new(1, u16::MAX as i32)) + .field("height", gst::IntRange::::new(1, u16::MAX as i32)) + .build(), + gst::Structure::builder("audio/mpeg") + .field("mpegversion", 4i32) + .field("stream-format", "raw") + .field("channels", gst::IntRange::::new(1, u16::MAX as i32)) + .field("rate", gst::IntRange::::new(1, i32::MAX)) + .build(), + gst::Structure::builder("audio/x-alaw") + .field("channels", gst::IntRange::::new(1, 2)) + .field("rate", gst::IntRange::::new(1, i32::MAX)) + .build(), + gst::Structure::builder("audio/x-mulaw") + .field("channels", gst::IntRange::::new(1, 2)) + .field("rate", gst::IntRange::::new(1, i32::MAX)) + .build(), + gst::Structure::builder("audio/x-adpcm") + .field("layout", "g726") + .field("channels", 1i32) + .field("rate", 8000i32) + .field("bitrate", gst::List::new([16000i32, 24000, 32000, 40000])) + .build(), + gst::Structure::builder("application/x-onvif-metadata") + .field("parsed", true) + .build(), + ] + .into_iter() + .collect::(), + super::FMP4MuxPad::static_type(), + ) + .unwrap(); + + vec![src_pad_template, sink_pad_template] + }); + + PAD_TEMPLATES.as_ref() + } +} + +impl AggregatorImpl for ONVIFFMP4Mux {} + +impl FMP4MuxImpl for ONVIFFMP4Mux { + const VARIANT: super::Variant = super::Variant::ONVIF; +} + +#[derive(Default, Clone)] +struct PadSettings { + trak_timescale: u32, +} + +#[derive(Default)] +pub(crate) struct FMP4MuxPad { + settings: Mutex, +} + +#[glib::object_subclass] +impl ObjectSubclass for FMP4MuxPad { + const NAME: &'static str = "GstFMP4MuxPad"; + type Type = super::FMP4MuxPad; + type ParentType = gst_base::AggregatorPad; +} + +impl ObjectImpl for FMP4MuxPad { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![glib::ParamSpecUInt::builder("trak-timescale") + .nick("Track Timescale") + .blurb("Timescale to use for the track (units per second, 0 is automatic)") + .mutable_ready() + .build()] + }); + + &PROPERTIES + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + match pspec.name() { + "trak-timescale" => { + let mut settings = self.settings.lock().unwrap(); + settings.trak_timescale = value.get().expect("type checked upstream"); + } + + _ => unimplemented!(), + } + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + match pspec.name() { + "trak-timescale" => { + let settings = self.settings.lock().unwrap(); + settings.trak_timescale.to_value() + } + + _ => unimplemented!(), + } + } +} + +impl GstObjectImpl for FMP4MuxPad {} + +impl PadImpl for FMP4MuxPad {} + +impl AggregatorPadImpl for FMP4MuxPad { + fn flush(&self, aggregator: &gst_base::Aggregator) -> Result { + let mux = aggregator.downcast_ref::().unwrap(); + let mut mux_state = mux.imp().state.lock().unwrap(); + + for stream in &mut mux_state.streams { + if stream.sinkpad == *self.obj() { + stream.queued_gops.clear(); + stream.dts_offset = None; + stream.current_position = gst::ClockTime::ZERO; + stream.fragment_filled = false; + stream.pre_queue.clear(); + stream.running_time_utc_time_mapping = None; + break; + } + } + + drop(mux_state); + + self.parent_flush(aggregator) + } +} \ No newline at end of file diff --git a/gstreamer/src/fmp4mux/mod.rs b/gstreamer/src/fmp4mux/mod.rs new file mode 100644 index 0000000..6796094 --- /dev/null +++ b/gstreamer/src/fmp4mux/mod.rs @@ -0,0 +1,208 @@ +// Copyright (C) 2021 Sebastian Dröge +// +// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0. +// If a copy of the MPL was not distributed with this file, You can obtain one at +// . +// +// SPDX-License-Identifier: MPL-2.0 + +use gst::glib; +use gst::prelude::*; + +mod boxes; +mod imp; + +glib::wrapper! { + pub(crate) struct FMP4MuxPad(ObjectSubclass) @extends gst_base::AggregatorPad, gst::Pad, gst::Object; +} + +glib::wrapper! { + pub(crate) struct FMP4Mux(ObjectSubclass) @extends gst_base::Aggregator, gst::Element, gst::Object; +} + +glib::wrapper! { + pub(crate) struct ISOFMP4Mux(ObjectSubclass) @extends FMP4Mux, gst_base::Aggregator, gst::Element, gst::Object; +} + +glib::wrapper! { + pub(crate) struct CMAFMux(ObjectSubclass) @extends FMP4Mux, gst_base::Aggregator, gst::Element, gst::Object; +} + +glib::wrapper! { + pub(crate) struct DASHMP4Mux(ObjectSubclass) @extends FMP4Mux, gst_base::Aggregator, gst::Element, gst::Object; +} + +glib::wrapper! { + pub(crate) struct ONVIFFMP4Mux(ObjectSubclass) @extends FMP4Mux, gst_base::Aggregator, gst::Element, gst::Object; +} + +pub fn register(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { + #[cfg(feature = "doc")] + { + FMP4Mux::static_type().mark_as_plugin_api(gst::PluginAPIFlags::empty()); + FMP4MuxPad::static_type().mark_as_plugin_api(gst::PluginAPIFlags::empty()); + HeaderUpdateMode::static_type().mark_as_plugin_api(gst::PluginAPIFlags::empty()); + } + gst::Element::register( + Some(plugin), + "isofmp4mux", + gst::Rank::PRIMARY, + ISOFMP4Mux::static_type(), + )?; + gst::Element::register( + Some(plugin), + "cmafmux", + gst::Rank::PRIMARY, + CMAFMux::static_type(), + )?; + gst::Element::register( + Some(plugin), + "dashmp4mux", + gst::Rank::PRIMARY, + DASHMP4Mux::static_type(), + )?; + gst::Element::register( + Some(plugin), + "onviffmp4mux", + gst::Rank::PRIMARY, + ONVIFFMP4Mux::static_type(), + )?; + + Ok(()) +} + +#[derive(Debug)] +pub(crate) struct HeaderConfiguration { + variant: Variant, + update: bool, + + /// Pre-defined movie timescale if not 0. + movie_timescale: u32, + + /// First caps must be the video/reference stream. Must be in the order the tracks are going to + /// be used later for the fragments too. + streams: Vec, + + write_mehd: bool, + duration: Option, + + /// Start UTC time in ONVIF mode. + /// Since Jan 1 1601 in 100ns units. + start_utc_time: Option, +} + +#[derive(Debug)] +pub(crate) struct HeaderStream { + /// Caps of this stream + caps: gst::Caps, + + /// Set if this is an intra-only stream + delta_frames: DeltaFrames, + + /// Pre-defined trak timescale if not 0. + trak_timescale: u32, +} + +#[derive(Debug)] +pub(crate) struct FragmentHeaderConfiguration<'a> { + variant: Variant, + + /// Sequence number for this fragment. + sequence_number: u32, + + /// If this is a full fragment or only a chunk. + chunk: bool, + + streams: &'a [FragmentHeaderStream], + buffers: &'a [Buffer], +} + +#[derive(Debug)] +pub(crate) struct FragmentHeaderStream { + /// Caps of this stream + caps: gst::Caps, + + /// Set if this is an intra-only stream + delta_frames: DeltaFrames, + + /// Pre-defined trak timescale if not 0. + trak_timescale: u32, + + /// Start time of this fragment + /// + /// `None` if this stream has no buffers in this fragment. + start_time: Option, +} + +#[derive(Debug, Copy, Clone)] +pub(crate) enum DeltaFrames { + /// Only single completely decodable frames + IntraOnly, + /// Frames may depend on past frames + PredictiveOnly, + /// Frames may depend on past or future frames + Bidirectional, +} + +impl DeltaFrames { + /// Whether dts is required to order buffers differently from presentation order + pub(crate) fn requires_dts(&self) -> bool { + matches!(self, Self::Bidirectional) + } + /// Whether this coding structure does not allow delta flags on buffers + pub(crate) fn intra_only(&self) -> bool { + matches!(self, Self::IntraOnly) + } +} + +#[derive(Debug)] +pub(crate) struct Buffer { + /// Track index + idx: usize, + + /// Actual buffer + buffer: gst::Buffer, + + /// Timestamp + timestamp: gst::ClockTime, + + /// Sample duration + duration: gst::ClockTime, + + /// Composition time offset + composition_time_offset: Option, +} + +#[allow(clippy::upper_case_acronyms)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum Variant { + ISO, + CMAF, + DASH, + ONVIF, +} + +impl Variant { + pub(crate) fn is_single_stream(self) -> bool { + match self { + Variant::ISO | Variant::ONVIF => false, + Variant::CMAF | Variant::DASH => true, + } + } +} + +#[derive(Debug)] +pub(crate) struct FragmentOffset { + time: gst::ClockTime, + offset: u64, +} + +#[allow(clippy::upper_case_acronyms)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, glib::Enum)] +#[repr(i32)] +#[enum_type(name = "GstFMP4MuxHeaderUpdateMode")] +pub(crate) enum HeaderUpdateMode { + None, + Rewrite, + Update, +} \ No newline at end of file diff --git a/gstreamer/src/lib.rs b/gstreamer/src/lib.rs new file mode 100644 index 0000000..9e7541e --- /dev/null +++ b/gstreamer/src/lib.rs @@ -0,0 +1,39 @@ +// Copyright (C) 2021 Sebastian Dröge +// +// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0. +// If a copy of the MPL was not distributed with this file, You can obtain one at +// . +// +// SPDX-License-Identifier: MPL-2.0 +#![allow(clippy::non_send_fields_in_send_ty, unused_doc_comments)] + +/** + * plugin-fmp4: + * + * Since: plugins-rs-0.8.0 + */ +use gst::glib; + +mod fmp4mux; +mod warpsink; +mod waylandsrc; + +fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { + waylandsrc::register(plugin)?; + fmp4mux::register(plugin)?; + warpsink::register(plugin)?; + Ok(()) +} + +gst::plugin_define!( + warp, + env!("CARGO_PKG_DESCRIPTION"), + plugin_init, + concat!(env!("CARGO_PKG_VERSION"), "-", env!("COMMIT_ID")), + // FIXME: MPL-2.0 is only allowed since 1.18.3 (as unknown) and 1.20 (as known) + "MPL", + env!("CARGO_PKG_NAME"), + env!("CARGO_PKG_NAME"), + env!("CARGO_PKG_REPOSITORY"), + env!("BUILD_REL_DATE") +); \ No newline at end of file diff --git a/src/warpsink/imp.rs b/gstreamer/src/warpsink/imp.rs similarity index 58% rename from src/warpsink/imp.rs rename to gstreamer/src/warpsink/imp.rs index 99f8326..6a52b7a 100644 --- a/src/warpsink/imp.rs +++ b/gstreamer/src/warpsink/imp.rs @@ -34,7 +34,7 @@ impl ElementImpl for MoqSink { "MoQ Sink", "Sink/Network", "Send data over QUIC using the MoQ protocol", - "Wanjohi Ryan ", + "Wanjohi Ryan", ) }); Some(&*ELEMENT_METADATA) @@ -94,58 +94,14 @@ impl BaseSinkImpl for MoqSink { fn render(&self, element: &Self::Type, buffer: &gst::Buffer) -> Result { // Send buffer data over QUIC using moq_transport. - // Extract data from the buffer - let data = buffer.map_readable().map_err(|_| gst::FlowError::Error)?; + // from the provided `imp.rs` code. For simplicity, we'll just print the buffer size. + + let size = buffer.size(); + println!("Received buffer of size {}", size); - // Assuming that the upstream element is producing MP4 atoms, we need to distinguish - // between 'moof' and 'mdat' atoms. We will also assume that each buffer contains a - // complete atom for simplicity. In a real-world scenario, you might need to handle - // partial atoms and reassemble them here. - // - // The first 4 bytes of the buffer contain the size of the atom, and the next 4 bytes - // contain the atom type. For 'moof' and 'mdat', we would send them as separate messages - // or bundle them together, depending on the protocol requirements. + // You would then send the buffer data over QUIC using moq_transport. + // This part is omitted and should be implemented based on your transport protocol. - // Check if the buffer is large enough to contain the size and type of the atom. - if data.len() < 8 { - gst::element_error!( - element, - gst::CoreError::Failed, - ("Buffer is too small to contain an MP4 atom") - ); - return Err(gst::FlowError::Error); - } - - // Read atom size and type - let size = u32::from_be_bytes(data[0..4].try_into().unwrap()) as usize; - let atom_type = &data[4..8]; - - // Ensure the buffer contains the complete atom - if size > data.len() { - gst::element_error!( - element, - gst::CoreError::Failed, - ("Buffer does not contain the complete MP4 atom") - ); - return Err(gst::FlowError::Error); - } - - // Handle 'moof' and 'mdat' atoms - match atom_type { - b"moof" => { - // Handle 'moof' atom - // Send the 'moof' atom over the moq_transport protocol - // self.moq_transport.send_moof(data.as_slice()); - }, - b"mdat" => { - // Handle 'mdat' atom - // Send the 'mdat' atom over the moq_transport protocol - // self.moq_transport.send_mdat(data.as_slice()); - }, - _ => { - // Handle other atoms or ignore them - } - } Ok(gst::FlowSuccess::Ok) } } \ No newline at end of file diff --git a/src/warpsink/mod.rs b/gstreamer/src/warpsink/mod.rs similarity index 100% rename from src/warpsink/mod.rs rename to gstreamer/src/warpsink/mod.rs diff --git a/gstreamer/src/wayland/imp.rs b/gstreamer/src/wayland/imp.rs new file mode 100644 index 0000000..d45958f --- /dev/null +++ b/gstreamer/src/wayland/imp.rs @@ -0,0 +1,282 @@ +use std::sync::Mutex; + +use gst::message::Application; +use gst_video::{VideoCapsBuilder, VideoFormat}; + +use gst::subclass::prelude::*; +use gst::{glib, Event, Fraction}; +use gst::{ + glib::{once_cell::sync::Lazy, ValueArray}, + LibraryError, +}; +use gst::{prelude::*, Structure}; + +use gst_base::subclass::base_src::CreateSuccess; +use gst_base::subclass::prelude::*; +use gst_base::traits::BaseSrcExt; + +use waylanddisplaycore::WaylandDisplay; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::Registry; + +use crate::utils::{GstLayer, CAT}; + +pub struct WaylandDisplaySrc { + state: Mutex>, + settings: Mutex, +} + +impl Default for WaylandDisplaySrc { + fn default() -> Self { + WaylandDisplaySrc { + state: Mutex::new(None), + settings: Mutex::new(Settings::default()), + } + } +} + +#[derive(Debug, Default)] +pub struct Settings { + render_node: Option, +} + +pub struct State { + display: WaylandDisplay, +} + +#[glib::object_subclass] +impl ObjectSubclass for WaylandDisplaySrc { + const NAME: &'static str = "GstWaylandDisplaySrc"; + type Type = super::WaylandDisplaySrc; + type ParentType = gst_base::PushSrc; + type Interfaces = (); +} + +impl ObjectImpl for WaylandDisplaySrc { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![glib::ParamSpecString::builder("render-node") + .nick("DRM Render Node") + .blurb("DRM Render Node to use (e.g. /dev/dri/renderD128") + .construct() + .build()] + }); + + PROPERTIES.as_ref() + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + match pspec.name() { + "render-node" => { + let mut settings = self.settings.lock().unwrap(); + settings.render_node = value + .get::>() + .expect("Type checked upstream"); + } + _ => unreachable!(), + } + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + match pspec.name() { + "render-node" => { + let settings = self.settings.lock().unwrap(); + settings + .render_node + .clone() + .unwrap_or_else(|| String::from("/dev/dri/renderD128")) + .to_value() + } + _ => unreachable!(), + } + } + + fn constructed(&self) { + self.parent_constructed(); + + let obj = self.obj(); + obj.set_element_flags(gst::ElementFlags::SOURCE); + obj.set_live(true); + obj.set_format(gst::Format::Time); + obj.set_automatic_eos(false); + obj.set_do_timestamp(true); + } +} + +impl GstObjectImpl for WaylandDisplaySrc {} + +impl ElementImpl for WaylandDisplaySrc { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "Wayland display source", + "Source/Video", + "GStreamer video src running a wayland compositor", + "Victoria Brekenfeld ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let caps = gst_video::VideoCapsBuilder::new() + .format(VideoFormat::Rgbx) + .height_range(..i32::MAX) + .width_range(..i32::MAX) + .framerate_range(Fraction::new(1, 1)..Fraction::new(i32::MAX, 1)) + .build(); + let src_pad_template = gst::PadTemplate::new( + "src", + gst::PadDirection::Src, + gst::PadPresence::Always, + &caps, + ) + .unwrap(); + + vec![src_pad_template] + }); + + PAD_TEMPLATES.as_ref() + } + + fn change_state( + &self, + transition: gst::StateChange, + ) -> Result { + let res = self.parent_change_state(transition); + match res { + Ok(gst::StateChangeSuccess::Success) => { + if transition.next() == gst::State::Paused { + // this is a live source + Ok(gst::StateChangeSuccess::NoPreroll) + } else { + Ok(gst::StateChangeSuccess::Success) + } + } + x => x, + } + } + + fn query(&self, query: &mut gst::QueryRef) -> bool { + ElementImplExt::parent_query(self, query) + } +} + +impl BaseSrcImpl for WaylandDisplaySrc { + fn query(&self, query: &mut gst::QueryRef) -> bool { + BaseSrcImplExt::parent_query(self, query) + } + + fn caps(&self, filter: Option<&gst::Caps>) -> Option { + let mut caps = VideoCapsBuilder::new() + .format(VideoFormat::Rgbx) + .height_range(..i32::MAX) + .width_range(..i32::MAX) + .framerate_range(Fraction::new(1, 1)..Fraction::new(i32::MAX, 1)) + .build(); + + if let Some(filter) = filter { + caps = caps.intersect(filter); + } + + Some(caps) + } + + fn negotiate(&self) -> Result<(), gst::LoggableError> { + self.parent_negotiate() + } + + fn event(&self, event: &Event) -> bool { + if event.type_() == gst::EventType::CustomUpstream { + let structure = event.structure().expect("Unable to get message structure"); + if structure.has_name("VirtualDevicesReady") { + let mut state = self.state.lock().unwrap(); + let display = &mut state.as_mut().unwrap().display; + + let paths = structure + .get::("paths") + .expect("Should contain paths"); + for value in paths.into_iter() { + let path = value.get::().expect("Paths are strings"); + display.add_input_device(path); + } + + return true; + } + } + self.parent_event(event) + } + + fn set_caps(&self, caps: &gst::Caps) -> Result<(), gst::LoggableError> { + let video_info = gst_video::VideoInfo::from_caps(caps).expect("failed to get video info"); + self.state + .lock() + .unwrap() + .as_mut() + .unwrap() + .display + .set_video_info(video_info); + + self.parent_set_caps(caps) + } + + fn start(&self) -> Result<(), gst::ErrorMessage> { + let mut state = self.state.lock().unwrap(); + if state.is_some() { + return Ok(()); + } + + let settings = self.settings.lock().unwrap(); + let elem = self.obj().upcast_ref::().to_owned(); + let subscriber = Registry::default().with(GstLayer); + + let Ok(mut display) = tracing::subscriber::with_default(subscriber, || WaylandDisplay::new(settings.render_node.clone())) else { + return Err(gst::error_msg!(LibraryError::Failed, ("Failed to open drm node {}, if you want to utilize software rendering set `render-node=software`.", settings.render_node.as_deref().unwrap_or("/dev/dri/renderD128")))); + }; + + let mut structure = Structure::builder("wayland.src"); + for (key, var) in display.env_vars().flat_map(|var| var.split_once("=")) { + structure = structure.field(key, var); + } + let structure = structure.build(); + if let Err(err) = elem.post_message(Application::builder(structure).src(&elem).build()) { + gst::warning!(CAT, "Failed to post environment to gstreamer bus: {}", err); + } + + *state = Some(State { display }); + + Ok(()) + } + + fn stop(&self) -> Result<(), gst::ErrorMessage> { + let mut state = self.state.lock().unwrap(); + if let Some(state) = state.take() { + let subscriber = Registry::default().with(GstLayer); + tracing::subscriber::with_default(subscriber, || std::mem::drop(state.display)); + } + Ok(()) + } + + fn is_seekable(&self) -> bool { + false + } +} + +impl PushSrcImpl for WaylandDisplaySrc { + fn create( + &self, + _buffer: Option<&mut gst::BufferRef>, + ) -> Result { + let mut state_guard = self.state.lock().unwrap(); + let Some(state) = state_guard.as_mut() else { + return Err(gst::FlowError::Eos); + }; + + let subscriber = Registry::default().with(GstLayer); + tracing::subscriber::with_default(subscriber, || { + state.display.frame().map(CreateSuccess::NewBuffer) + }) + } +} \ No newline at end of file diff --git a/gstreamer/src/wayland/mod.rs b/gstreamer/src/wayland/mod.rs new file mode 100644 index 0000000..e2a0651 --- /dev/null +++ b/gstreamer/src/wayland/mod.rs @@ -0,0 +1,17 @@ +use gst::glib; +use gst::prelude::*; + +mod imp; + +glib::wrapper! { + pub struct WaylandDisplaySrc(ObjectSubclass) @extends gst_base::PushSrc, gst_base::BaseSrc, gst::Element, gst::Object; +} + +pub fn register(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { + gst::Element::register( + Some(plugin), + "waylanddisplaysrc", + gst::Rank::Marginal, + WaylandDisplaySrc::static_type(), + ) +} \ No newline at end of file diff --git a/moq-transport/Cargo.lock b/moq-transport/Cargo.lock new file mode 100644 index 0000000..02d73b0 --- /dev/null +++ b/moq-transport/Cargo.lock @@ -0,0 +1,1315 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-executor" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c1da3ae8dabd9c00f453a329dfe1fb28da3c0a72e2478cdcd93171740c20499" +dependencies = [ + "async-lock", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite", + "log", + "parking", + "polling", + "rustix", + "slab", + "socket2 0.4.9", + "waker-fn", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-channel", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9441c6b2fe128a7c2bf680a44c34d0df31ce09e5b7e401fcca3faa483dbc921" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "blocking" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c36a4d0d48574b3dd360b4b7d95cc651d2b6557b6402848a27d4b228a473e2a" +dependencies = [ + "async-channel", + "async-lock", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite", + "piper", + "tracing", +] + +[[package]] +name = "bumpalo" +version = "3.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" + +[[package]] +name = "bytes" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" + +[[package]] +name = "cc" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "concurrent-queue" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" + +[[package]] +name = "futures-executor" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-macro" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "futures-sink" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" + +[[package]] +name = "futures-task" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" + +[[package]] +name = "futures-util" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "hashbrown" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" + +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + +[[package]] +name = "http" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "itoa" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" + +[[package]] +name = "js-sys" +version = "0.3.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "libc" +version = "0.2.144" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", + "value-bag", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "mio" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +dependencies = [ + "libc", + "log", + "wasi", + "windows-sys 0.45.0", +] + +[[package]] +name = "moq-transport" +version = "0.2.0" +dependencies = [ + "bytes", + "indexmap", + "log", + "quinn", + "thiserror", + "tokio", + "webtransport-quinn", +] + +[[package]] +name = "once_cell" +version = "1.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "parking" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e52c774a4c39359c1d1c52e43f73dd91a75a614652c825408eec30c95a9b2067" + +[[package]] +name = "percent-encoding" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" + +[[package]] +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quinn" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21252f1c0fc131f1b69182db8f34837e8a69737b8251dff75636a9be0518c324" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85af4ed6ee5a89f26a26086e9089a6643650544c025158449a3626ebf72884b3" +dependencies = [ + "bytes", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-native-certs", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.3", + "tracing", + "windows-sys 0.48.0", +] + +[[package]] +name = "quote" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustix" +version = "0.37.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustls" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +dependencies = [ + "ring", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] + +[[package]] +name = "socket2" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thiserror" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +dependencies = [ + "autocfg", + "bytes", + "libc", + "mio", + "pin-project-lite", + "socket2 0.4.9", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "tracing-core" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +dependencies = [ + "once_cell", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + +[[package]] +name = "unicode-ident" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "url" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "value-bag" +version = "1.0.0-alpha.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" +dependencies = [ + "ctor", + "version_check", +] + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.16", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" + +[[package]] +name = "web-sys" +version = "0.3.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webtransport-generic" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df712317d761312996f654739debeb3838eb02c6fd9146d9efdfd08a46674e45" +dependencies = [ + "bytes", + "tokio", +] + +[[package]] +name = "webtransport-proto" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebeada5037d6302980ae2e0ab8d840e329c1697c612c6c077172de2b7631a276" +dependencies = [ + "bytes", + "http", + "thiserror", + "url", +] + +[[package]] +name = "webtransport-quinn" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cceb876dbd00a87b3fd8869d1c315e07c28b0eb54d59b592a07a634f5e2b64e1" +dependencies = [ + "async-std", + "bytes", + "futures", + "http", + "quinn", + "quinn-proto", + "thiserror", + "tokio", + "url", + "webtransport-generic", + "webtransport-proto", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" diff --git a/moq-transport/Cargo.toml b/moq-transport/Cargo.toml new file mode 100644 index 0000000..6f5f050 --- /dev/null +++ b/moq-transport/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "moq-transport" +description = "Media over QUIC" +authors = ["Luke Curley"] +repository = "https://github.com/kixelated/moq-rs" +license = "MIT OR Apache-2.0" + +version = "0.2.0" +edition = "2021" + +keywords = ["quic", "http3", "webtransport", "media", "live"] +categories = ["multimedia", "network-programming", "web-programming"] + + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bytes = "1" +thiserror = "1" +tokio = { version = "1", features = ["macros", "io-util", "sync"] } +log = "0.4" +indexmap = "2" + +quinn = "0.10" +webtransport-quinn = "0.6.1" +#webtransport-quinn = { path = "../../webtransport-rs/webtransport-quinn" } + +async-trait = "0.1" +paste = "1" + +[dev-dependencies] +# QUIC +url = "2" + +# Crypto +rustls = { version = "0.21", features = ["dangerous_configuration"] } +rustls-native-certs = "0.6" +rustls-pemfile = "1" + +# Async stuff +tokio = { version = "1", features = ["full"] } + +# CLI, logging, error handling +clap = { version = "4", features = ["derive"] } +log = { version = "0.4", features = ["std"] } +env_logger = "0.9" +mp4 = "0.13" +anyhow = { version = "1", features = ["backtrace"] } +serde_json = "1" +rfc6381-codec = "0.1" +tracing = "0.1" +tracing-subscriber = "0.3" diff --git a/moq-transport/LICENSE-APACHE b/moq-transport/LICENSE-APACHE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/moq-transport/LICENSE-APACHE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/moq-transport/LICENSE-MIT b/moq-transport/LICENSE-MIT new file mode 100644 index 0000000..fbd437c --- /dev/null +++ b/moq-transport/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Luke Curley + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/moq-transport/README.md b/moq-transport/README.md new file mode 100644 index 0000000..7788103 --- /dev/null +++ b/moq-transport/README.md @@ -0,0 +1,10 @@ +[![Documentation](https://docs.rs/moq-transport/badge.svg)](https://docs.rs/moq-transport/) +[![Crates.io](https://img.shields.io/crates/v/moq-transport.svg)](https://crates.io/crates/moq-transport) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE-MIT) + +# moq-transport + +A Rust implementation of the proposed IETF standard. + +[Specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/) +[Github](https://github.com/moq-wg/moq-transport) diff --git a/moq-transport/src/cache/broadcast.rs b/moq-transport/src/cache/broadcast.rs new file mode 100644 index 0000000..feb3824 --- /dev/null +++ b/moq-transport/src/cache/broadcast.rs @@ -0,0 +1,262 @@ +//! A broadcast is a collection of tracks, split into two handles: [Publisher] and [Subscriber]. +//! +//! The [Publisher] can create tracks, either manually or on request. +//! It receives all requests by a [Subscriber] for a tracks that don't exist. +//! The simplest implementation is to close every unknown track with [CacheError::NotFound]. +//! +//! A [Subscriber] can request tracks by name. +//! If the track already exists, it will be returned. +//! If the track doesn't exist, it will be sent to [Unknown] to be handled. +//! A [Subscriber] can be cloned to create multiple subscriptions. +//! +//! The broadcast is automatically closed with [CacheError::Closed] when [Publisher] is dropped, or all [Subscriber]s are dropped. +use std::{ + collections::{hash_map, HashMap, VecDeque}, + fmt, + ops::Deref, + sync::Arc, +}; + +use super::{track, CacheError, Watch}; + +/// Create a new broadcast. +pub fn new(id: &str) -> (Publisher, Subscriber) { + let state = Watch::new(State::default()); + let info = Arc::new(Info { id: id.to_string() }); + + let publisher = Publisher::new(state.clone(), info.clone()); + let subscriber = Subscriber::new(state, info); + + (publisher, subscriber) +} + +/// Static information about a broadcast. +#[derive(Debug)] +pub struct Info { + pub id: String, +} + +/// Dynamic information about the broadcast. +#[derive(Debug)] +struct State { + tracks: HashMap, + requested: VecDeque, + closed: Result<(), CacheError>, +} + +impl State { + pub fn get(&self, name: &str) -> Result, CacheError> { + // Don't check closed, so we can return from cache. + Ok(self.tracks.get(name).cloned()) + } + + pub fn insert(&mut self, track: track::Subscriber) -> Result<(), CacheError> { + self.closed.clone()?; + + match self.tracks.entry(track.name.clone()) { + hash_map::Entry::Occupied(_) => return Err(CacheError::Duplicate), + hash_map::Entry::Vacant(v) => v.insert(track), + }; + + Ok(()) + } + + pub fn request(&mut self, name: &str) -> Result { + self.closed.clone()?; + + // Create a new track. + let (publisher, subscriber) = track::new(name); + + // Insert the track into our Map so we deduplicate future requests. + self.tracks.insert(name.to_string(), subscriber.clone()); + + // Send the track to the Publisher to handle. + self.requested.push_back(publisher); + + Ok(subscriber) + } + + pub fn has_next(&self) -> Result { + // Check if there's any elements in the queue before checking closed. + if !self.requested.is_empty() { + return Ok(true); + } + + self.closed.clone()?; + Ok(false) + } + + pub fn next(&mut self) -> track::Publisher { + // We panic instead of erroring to avoid a nasty wakeup loop if you don't call has_next first. + self.requested.pop_front().expect("no entry in queue") + } + + pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> { + self.closed.clone()?; + self.closed = Err(err); + Ok(()) + } +} + +impl Default for State { + fn default() -> Self { + Self { + tracks: HashMap::new(), + closed: Ok(()), + requested: VecDeque::new(), + } + } +} + +/// Publish new tracks for a broadcast by name. +// TODO remove Clone +#[derive(Clone)] +pub struct Publisher { + state: Watch, + info: Arc, + _dropped: Arc, +} + +impl Publisher { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + Self { state, info, _dropped } + } + + /// Create a new track with the given name, inserting it into the broadcast. + pub fn create_track(&mut self, name: &str) -> Result { + let (publisher, subscriber) = track::new(name); + self.state.lock_mut().insert(subscriber)?; + Ok(publisher) + } + + /// Insert a track into the broadcast. + pub fn insert_track(&mut self, track: track::Subscriber) -> Result<(), CacheError> { + self.state.lock_mut().insert(track) + } + + /// Block until the next track requested by a subscriber. + pub async fn next_track(&mut self) -> Result { + loop { + let notify = { + let state = self.state.lock(); + if state.has_next()? { + return Ok(state.into_mut().next()); + } + + state.changed() + }; + + notify.await; + } + } + + /// Close the broadcast with an error. + pub fn close(self, err: CacheError) -> Result<(), CacheError> { + self.state.lock_mut().close(err) + } +} + +impl Deref for Publisher { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Publisher { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Publisher") + .field("state", &self.state) + .field("info", &self.info) + .finish() + } +} + +/// Subscribe to a broadcast by requesting tracks. +/// +/// This can be cloned to create handles. +#[derive(Clone)] +pub struct Subscriber { + state: Watch, + info: Arc, + _dropped: Arc, +} + +impl Subscriber { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + Self { state, info, _dropped } + } + + /// Get a track from the broadcast by name. + /// If the track does not exist, it will be created and potentially fufilled by the publisher (via Unknown). + /// Otherwise, it will return [CacheError::NotFound]. + pub fn get_track(&self, name: &str) -> Result { + let state = self.state.lock(); + if let Some(track) = state.get(name)? { + return Ok(track); + } + + // Request a new track if it does not exist. + state.into_mut().request(name) + } + + /// Check if the broadcast is closed, either because the publisher was dropped or called [Publisher::close]. + pub fn is_closed(&self) -> Option { + self.state.lock().closed.as_ref().err().cloned() + } + + /// Wait until if the broadcast is closed, either because the publisher was dropped or called [Publisher::close]. + pub async fn closed(&self) -> CacheError { + loop { + let notify = { + let state = self.state.lock(); + if let Some(err) = state.closed.as_ref().err() { + return err.clone(); + } + + state.changed() + }; + + notify.await; + } + } +} + +impl Deref for Subscriber { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Subscriber { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Subscriber") + .field("state", &self.state) + .field("info", &self.info) + .finish() + } +} + +// A handle that closes the broadcast when dropped: +// - when all Subscribers are dropped or +// - when Publisher and Unknown are dropped. +struct Dropped { + state: Watch, +} + +impl Dropped { + fn new(state: Watch) -> Self { + Self { state } + } +} + +impl Drop for Dropped { + fn drop(&mut self) { + self.state.lock_mut().close(CacheError::Closed).ok(); + } +} diff --git a/moq-transport/src/cache/error.rs b/moq-transport/src/cache/error.rs new file mode 100644 index 0000000..d3f907b --- /dev/null +++ b/moq-transport/src/cache/error.rs @@ -0,0 +1,51 @@ +use thiserror::Error; + +use crate::MoqError; + +#[derive(Clone, Debug, Error)] +pub enum CacheError { + /// A clean termination, represented as error code 0. + /// This error is automatically used when publishers or subscribers are dropped without calling close. + #[error("closed")] + Closed, + + /// An ANNOUNCE_RESET or SUBSCRIBE_RESET was sent by the publisher. + #[error("reset code={0:?}")] + Reset(u32), + + /// An ANNOUNCE_STOP or SUBSCRIBE_STOP was sent by the subscriber. + #[error("stop")] + Stop, + + /// The requested resource was not found. + #[error("not found")] + NotFound, + + /// A resource already exists with that ID. + #[error("duplicate")] + Duplicate, +} + +impl MoqError for CacheError { + /// An integer code that is sent over the wire. + fn code(&self) -> u32 { + match self { + Self::Closed => 0, + Self::Reset(code) => *code, + Self::Stop => 206, + Self::NotFound => 404, + Self::Duplicate => 409, + } + } + + /// A reason that is sent over the wire. + fn reason(&self) -> String { + match self { + Self::Closed => "closed".to_owned(), + Self::Reset(code) => format!("reset code: {}", code), + Self::Stop => "stop".to_owned(), + Self::NotFound => "not found".to_owned(), + Self::Duplicate => "duplicate".to_owned(), + } + } +} diff --git a/moq-transport/src/cache/fragment.rs b/moq-transport/src/cache/fragment.rs new file mode 100644 index 0000000..4e08333 --- /dev/null +++ b/moq-transport/src/cache/fragment.rs @@ -0,0 +1,208 @@ +//! A fragment is a stream of bytes with a header, split into a [Publisher] and [Subscriber] handle. +//! +//! A [Publisher] writes an ordered stream of bytes in chunks. +//! There's no framing, so these chunks can be of any size or position, and won't be maintained over the network. +//! +//! A [Subscriber] reads an ordered stream of bytes in chunks. +//! These chunks are returned directly from the QUIC connection, so they may be of any size or position. +//! You can clone the [Subscriber] and each will read a copy of of all future chunks. (fanout) +//! +//! The fragment is closed with [CacheError::Closed] when all publishers or subscribers are dropped. +use core::fmt; +use std::{ops::Deref, sync::Arc}; + +use crate::VarInt; +use bytes::Bytes; + +use super::{CacheError, Watch}; + +/// Create a new segment with the given info. +pub fn new(info: Info) -> (Publisher, Subscriber) { + let state = Watch::new(State::default()); + let info = Arc::new(info); + + let publisher = Publisher::new(state.clone(), info.clone()); + let subscriber = Subscriber::new(state, info); + + (publisher, subscriber) +} + +/// Static information about the segment. +#[derive(Debug)] +pub struct Info { + // The sequence number of the fragment within the segment. + // NOTE: These may be received out of order or with gaps. + pub sequence: VarInt, + + // The size of the fragment, optionally None if this is the last fragment in a segment. + // TODO enforce this size. + pub size: Option, +} + +struct State { + // The data that has been received thus far. + chunks: Vec, + + // Set when the publisher is dropped. + closed: Result<(), CacheError>, +} + +impl State { + pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> { + self.closed.clone()?; + self.closed = Err(err); + Ok(()) + } +} + +impl Default for State { + fn default() -> Self { + Self { + chunks: Vec::new(), + closed: Ok(()), + } + } +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // We don't want to print out the contents, so summarize. + f.debug_struct("State").field("closed", &self.closed).finish() + } +} + +/// Used to write data to a segment and notify subscribers. +pub struct Publisher { + // Mutable segment state. + state: Watch, + + // Immutable segment state. + info: Arc, + + // Closes the segment when all Publishers are dropped. + _dropped: Arc, +} + +impl Publisher { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + Self { state, info, _dropped } + } + + /// Write a new chunk of bytes. + pub fn chunk(&mut self, chunk: Bytes) -> Result<(), CacheError> { + let mut state = self.state.lock_mut(); + state.closed.clone()?; + state.chunks.push(chunk); + Ok(()) + } + + /// Close the segment with an error. + pub fn close(self, err: CacheError) -> Result<(), CacheError> { + self.state.lock_mut().close(err) + } +} + +impl Deref for Publisher { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Publisher { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Publisher") + .field("state", &self.state) + .field("info", &self.info) + .finish() + } +} + +/// Notified when a segment has new data available. +#[derive(Clone)] +pub struct Subscriber { + // Modify the segment state. + state: Watch, + + // Immutable segment state. + info: Arc, + + // The number of chunks that we've read. + // NOTE: Cloned subscribers inherit this index, but then run in parallel. + index: usize, + + // Dropped when all Subscribers are dropped. + _dropped: Arc, +} + +impl Subscriber { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + + Self { + state, + info, + index: 0, + _dropped, + } + } + + /// Block until the next chunk of bytes is available. + pub async fn chunk(&mut self) -> Result, CacheError> { + loop { + let notify = { + let state = self.state.lock(); + if self.index < state.chunks.len() { + let chunk = state.chunks[self.index].clone(); + self.index += 1; + return Ok(Some(chunk)); + } + + match &state.closed { + Err(CacheError::Closed) => return Ok(None), + Err(err) => return Err(err.clone()), + Ok(()) => state.changed(), + } + }; + + notify.await; // Try again when the state changes + } + } +} + +impl Deref for Subscriber { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Subscriber { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Subscriber") + .field("state", &self.state) + .field("info", &self.info) + .field("index", &self.index) + .finish() + } +} + +struct Dropped { + // Modify the segment state. + state: Watch, +} + +impl Dropped { + fn new(state: Watch) -> Self { + Self { state } + } +} + +impl Drop for Dropped { + fn drop(&mut self) { + self.state.lock_mut().close(CacheError::Closed).ok(); + } +} diff --git a/moq-transport/src/cache/mod.rs b/moq-transport/src/cache/mod.rs new file mode 100644 index 0000000..96228cf --- /dev/null +++ b/moq-transport/src/cache/mod.rs @@ -0,0 +1,21 @@ +//! Allows a publisher to push updates, automatically caching and fanning it out to any subscribers. +//! +//! The hierarchy is: [broadcast] -> [track] -> [segment] -> [fragment] -> [Bytes](bytes::Bytes) +//! +//! The naming scheme doesn't match the spec because it's more strict, and bikeshedding of course: +//! +//! - [broadcast] is kinda like "track namespace" +//! - [track] is "track" +//! - [segment] is "group" but MUST use a single stream. +//! - [fragment] is "object" but MUST have the same properties as the segment. + +pub mod broadcast; +mod error; +pub mod fragment; +pub mod segment; +pub mod track; + +pub(crate) mod watch; +pub(crate) use watch::*; + +pub use error::*; diff --git a/moq-transport/src/cache/segment.rs b/moq-transport/src/cache/segment.rs new file mode 100644 index 0000000..ecd27de --- /dev/null +++ b/moq-transport/src/cache/segment.rs @@ -0,0 +1,226 @@ +//! A segment is a stream of fragments with a header, split into a [Publisher] and [Subscriber] handle. +//! +//! A [Publisher] writes an ordered stream of fragments. +//! Each fragment can have a sequence number, allowing the subscriber to detect gaps fragments. +//! +//! A [Subscriber] reads an ordered stream of fragments. +//! The subscriber can be cloned, in which case each subscriber receives a copy of each fragment. (fanout) +//! +//! The segment is closed with [CacheError::Closed] when all publishers or subscribers are dropped. +use core::fmt; +use std::{ops::Deref, sync::Arc, time}; + +use crate::VarInt; + +use super::{fragment, CacheError, Watch}; + +/// Create a new segment with the given info. +pub fn new(info: Info) -> (Publisher, Subscriber) { + let state = Watch::new(State::default()); + let info = Arc::new(info); + + let publisher = Publisher::new(state.clone(), info.clone()); + let subscriber = Subscriber::new(state, info); + + (publisher, subscriber) +} + +/// Static information about the segment. +#[derive(Debug)] +pub struct Info { + // The sequence number of the segment within the track. + // NOTE: These may be received out of order or with gaps. + pub sequence: VarInt, + + // The priority of the segment within the BROADCAST. + pub priority: u32, + + // Cache the segment for at most this long. + pub expires: Option, +} + +struct State { + // The data that has been received thus far. + fragments: Vec, + + // Set when the publisher is dropped. + closed: Result<(), CacheError>, +} + +impl State { + pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> { + self.closed.clone()?; + self.closed = Err(err); + Ok(()) + } +} + +impl Default for State { + fn default() -> Self { + Self { + fragments: Vec::new(), + closed: Ok(()), + } + } +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("State") + .field("fragments", &self.fragments) + .field("closed", &self.closed) + .finish() + } +} + +/// Used to write data to a segment and notify subscribers. +pub struct Publisher { + // Mutable segment state. + state: Watch, + + // Immutable segment state. + info: Arc, + + // Closes the segment when all Publishers are dropped. + _dropped: Arc, +} + +impl Publisher { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + Self { state, info, _dropped } + } + + // Not public because it's a footgun. + pub(crate) fn push_fragment( + &mut self, + sequence: VarInt, + size: Option, + ) -> Result { + let (publisher, subscriber) = fragment::new(fragment::Info { sequence, size }); + + let mut state = self.state.lock_mut(); + state.closed.clone()?; + state.fragments.push(subscriber); + Ok(publisher) + } + + /// Write a fragment + pub fn fragment(&mut self, sequence: VarInt, size: usize) -> Result { + self.push_fragment(sequence, Some(size)) + } + + /// Write the last fragment, which means size can be unknown. + pub fn final_fragment(mut self, sequence: VarInt) -> Result { + self.push_fragment(sequence, None) + } + + /// Close the segment with an error. + pub fn close(self, err: CacheError) -> Result<(), CacheError> { + self.state.lock_mut().close(err) + } +} + +impl Deref for Publisher { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Publisher { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Publisher") + .field("state", &self.state) + .field("info", &self.info) + .finish() + } +} + +/// Notified when a segment has new data available. +#[derive(Clone)] +pub struct Subscriber { + // Modify the segment state. + state: Watch, + + // Immutable segment state. + info: Arc, + + // The number of chunks that we've read. + // NOTE: Cloned subscribers inherit this index, but then run in parallel. + index: usize, + + // Dropped when all Subscribers are dropped. + _dropped: Arc, +} + +impl Subscriber { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + + Self { + state, + info, + index: 0, + _dropped, + } + } + + /// Block until the next chunk of bytes is available. + pub async fn fragment(&mut self) -> Result, CacheError> { + loop { + let notify = { + let state = self.state.lock(); + if self.index < state.fragments.len() { + let fragment = state.fragments[self.index].clone(); + self.index += 1; + return Ok(Some(fragment)); + } + + match &state.closed { + Err(CacheError::Closed) => return Ok(None), + Err(err) => return Err(err.clone()), + Ok(()) => state.changed(), + } + }; + + notify.await; // Try again when the state changes + } + } +} + +impl Deref for Subscriber { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Subscriber { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Subscriber") + .field("state", &self.state) + .field("info", &self.info) + .field("index", &self.index) + .finish() + } +} + +struct Dropped { + // Modify the segment state. + state: Watch, +} + +impl Dropped { + fn new(state: Watch) -> Self { + Self { state } + } +} + +impl Drop for Dropped { + fn drop(&mut self) { + self.state.lock_mut().close(CacheError::Closed).ok(); + } +} diff --git a/moq-transport/src/cache/track.rs b/moq-transport/src/cache/track.rs new file mode 100644 index 0000000..6d2d405 --- /dev/null +++ b/moq-transport/src/cache/track.rs @@ -0,0 +1,337 @@ +//! A track is a collection of semi-reliable and semi-ordered segments, split into a [Publisher] and [Subscriber] handle. +//! +//! A [Publisher] creates segments with a sequence number and priority. +//! The sequest number is used to determine the order of segments, while the priority is used to determine which segment to transmit first. +//! This may seem counter-intuitive, but is designed for live streaming where the newest segments may be higher priority. +//! A cloned [Publisher] can be used to create segments in parallel, but will error if a duplicate sequence number is used. +//! +//! A [Subscriber] may not receive all segments in order or at all. +//! These segments are meant to be transmitted over congested networks and the key to MoQ Tranport is to not block on them. +//! Segments will be cached for a potentially limited duration added to the unreliable nature. +//! A cloned [Subscriber] will receive a copy of all new segment going forward (fanout). +//! +//! The track is closed with [CacheError::Closed] when all publishers or subscribers are dropped. + +use std::{collections::BinaryHeap, fmt, ops::Deref, sync::Arc, time}; + +use indexmap::IndexMap; + +use super::{segment, CacheError, Watch}; +use crate::VarInt; + +/// Create a track with the given name. +pub fn new(name: &str) -> (Publisher, Subscriber) { + let state = Watch::new(State::default()); + let info = Arc::new(Info { name: name.to_string() }); + + let publisher = Publisher::new(state.clone(), info.clone()); + let subscriber = Subscriber::new(state, info); + + (publisher, subscriber) +} + +/// Static information about a track. +#[derive(Debug)] +pub struct Info { + pub name: String, +} + +struct State { + // Store segments in received order so subscribers can detect changes. + // The key is the segment sequence, which could have gaps. + // A None value means the segment has expired. + lookup: IndexMap>, + + // Store when segments will expire in a priority queue. + expires: BinaryHeap, + + // The number of None entries removed from the start of the lookup. + pruned: usize, + + // Set when the publisher is closed/dropped, or all subscribers are dropped. + closed: Result<(), CacheError>, +} + +impl State { + pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> { + self.closed.clone()?; + self.closed = Err(err); + Ok(()) + } + + pub fn insert(&mut self, segment: segment::Subscriber) -> Result<(), CacheError> { + self.closed.clone()?; + + let entry = match self.lookup.entry(segment.sequence) { + indexmap::map::Entry::Occupied(_entry) => return Err(CacheError::Duplicate), + indexmap::map::Entry::Vacant(entry) => entry, + }; + + if let Some(expires) = segment.expires { + self.expires.push(SegmentExpiration { + sequence: segment.sequence, + expires: time::Instant::now() + expires, + }); + } + + entry.insert(Some(segment)); + + // Expire any existing segments on insert. + // This means if you don't insert then you won't expire... but it's probably fine since the cache won't grow. + // TODO Use a timer to expire segments at the correct time instead + self.expire(); + + Ok(()) + } + + // Try expiring any segments + pub fn expire(&mut self) { + let now = time::Instant::now(); + while let Some(segment) = self.expires.peek() { + if segment.expires > now { + break; + } + + // Update the entry to None while preserving the index. + match self.lookup.entry(segment.sequence) { + indexmap::map::Entry::Occupied(mut entry) => entry.insert(None), + indexmap::map::Entry::Vacant(_) => panic!("expired segment not found"), + }; + + self.expires.pop(); + } + + // Remove None entries from the start of the lookup. + while let Some((_, None)) = self.lookup.get_index(0) { + self.lookup.shift_remove_index(0); + self.pruned += 1; + } + } +} + +impl Default for State { + fn default() -> Self { + Self { + lookup: Default::default(), + expires: Default::default(), + pruned: 0, + closed: Ok(()), + } + } +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("State") + .field("lookup", &self.lookup) + .field("pruned", &self.pruned) + .field("closed", &self.closed) + .finish() + } +} + +/// Creates new segments for a track. +pub struct Publisher { + state: Watch, + info: Arc, + _dropped: Arc, +} + +impl Publisher { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + Self { state, info, _dropped } + } + + /// Insert a new segment. + pub fn insert_segment(&mut self, segment: segment::Subscriber) -> Result<(), CacheError> { + self.state.lock_mut().insert(segment) + } + + /// Create an insert a segment with the given info. + pub fn create_segment(&mut self, info: segment::Info) -> Result { + let (publisher, subscriber) = segment::new(info); + self.insert_segment(subscriber)?; + Ok(publisher) + } + + /// Close the segment with an error. + pub fn close(self, err: CacheError) -> Result<(), CacheError> { + self.state.lock_mut().close(err) + } +} + +impl Deref for Publisher { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Publisher { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Publisher") + .field("state", &self.state) + .field("info", &self.info) + .finish() + } +} + +/// Receives new segments for a track. +#[derive(Clone)] +pub struct Subscriber { + state: Watch, + info: Arc, + + // The index of the next segment to return. + index: usize, + + // If there are multiple segments to return, we put them in here to return them in priority order. + pending: BinaryHeap, + + // Dropped when all subscribers are dropped. + _dropped: Arc, +} + +impl Subscriber { + fn new(state: Watch, info: Arc) -> Self { + let _dropped = Arc::new(Dropped::new(state.clone())); + Self { + state, + info, + index: 0, + pending: Default::default(), + _dropped, + } + } + + /// Block until the next segment arrives + pub async fn segment(&mut self) -> Result, CacheError> { + loop { + let notify = { + let state = self.state.lock(); + + // Get our adjusted index, which could be negative if we've removed more broadcasts than read. + let mut index = self.index.saturating_sub(state.pruned); + + // Push all new segments into a priority queue. + while index < state.lookup.len() { + let (_, segment) = state.lookup.get_index(index).unwrap(); + + // Skip None values (expired segments). + // TODO These might actually be expired, so we should check the expiration time. + if let Some(segment) = segment { + self.pending.push(SegmentPriority(segment.clone())); + } + + index += 1; + } + + self.index = state.pruned + index; + + // Return the higher priority segment. + if let Some(segment) = self.pending.pop() { + return Ok(Some(segment.0)); + } + + // Otherwise check if we need to return an error. + match &state.closed { + Err(CacheError::Closed) => return Ok(None), + Err(err) => return Err(err.clone()), + Ok(()) => state.changed(), + } + }; + + notify.await + } + } +} + +impl Deref for Subscriber { + type Target = Info; + + fn deref(&self) -> &Self::Target { + &self.info + } +} + +impl fmt::Debug for Subscriber { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Subscriber") + .field("state", &self.state) + .field("info", &self.info) + .field("index", &self.index) + .finish() + } +} + +// Closes the track on Drop. +struct Dropped { + state: Watch, +} + +impl Dropped { + fn new(state: Watch) -> Self { + Self { state } + } +} + +impl Drop for Dropped { + fn drop(&mut self) { + self.state.lock_mut().close(CacheError::Closed).ok(); + } +} + +// Used to order segments by expiration time. +struct SegmentExpiration { + sequence: VarInt, + expires: time::Instant, +} + +impl Ord for SegmentExpiration { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // Reverse order so the earliest expiration is at the top of the heap. + other.expires.cmp(&self.expires) + } +} + +impl PartialOrd for SegmentExpiration { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for SegmentExpiration { + fn eq(&self, other: &Self) -> bool { + self.expires == other.expires + } +} + +impl Eq for SegmentExpiration {} + +// Used to order segments by priority +#[derive(Clone)] +struct SegmentPriority(pub segment::Subscriber); + +impl Ord for SegmentPriority { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // Reverse order so the highest priority is at the top of the heap. + // TODO I let CodePilot generate this code so yolo + other.0.priority.cmp(&self.0.priority) + } +} + +impl PartialOrd for SegmentPriority { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for SegmentPriority { + fn eq(&self, other: &Self) -> bool { + self.0.priority == other.0.priority + } +} + +impl Eq for SegmentPriority {} diff --git a/moq-transport/src/cache/watch.rs b/moq-transport/src/cache/watch.rs new file mode 100644 index 0000000..93c8475 --- /dev/null +++ b/moq-transport/src/cache/watch.rs @@ -0,0 +1,180 @@ +use std::{ + fmt, + future::Future, + ops::{Deref, DerefMut}, + pin::Pin, + sync::{Arc, Mutex, MutexGuard}, + task, +}; + +struct State { + value: T, + wakers: Vec, + epoch: usize, +} + +impl State { + pub fn new(value: T) -> Self { + Self { + value, + wakers: Vec::new(), + epoch: 0, + } + } + + pub fn register(&mut self, waker: &task::Waker) { + self.wakers.retain(|existing| !existing.will_wake(waker)); + self.wakers.push(waker.clone()); + } + + pub fn notify(&mut self) { + self.epoch += 1; + for waker in self.wakers.drain(..) { + waker.wake(); + } + } +} + +impl Default for State { + fn default() -> Self { + Self::new(T::default()) + } +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.value.fmt(f) + } +} + +pub struct Watch { + state: Arc>>, +} + +impl Watch { + pub fn new(initial: T) -> Self { + let state = Arc::new(Mutex::new(State::new(initial))); + Self { state } + } + + pub fn lock(&self) -> WatchRef { + WatchRef { + state: self.state.clone(), + lock: self.state.lock().unwrap(), + } + } + + pub fn lock_mut(&self) -> WatchMut { + WatchMut { + lock: self.state.lock().unwrap(), + } + } +} + +impl Clone for Watch { + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + } + } +} + +impl Default for Watch { + fn default() -> Self { + Self::new(T::default()) + } +} + +impl fmt::Debug for Watch { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.state.try_lock() { + Ok(lock) => lock.value.fmt(f), + Err(_) => write!(f, ""), + } + } +} + +pub struct WatchRef<'a, T> { + state: Arc>>, + lock: MutexGuard<'a, State>, +} + +impl<'a, T> WatchRef<'a, T> { + // Release the lock and wait for a notification when next updated. + pub fn changed(self) -> WatchChanged { + WatchChanged { + state: self.state, + epoch: self.lock.epoch, + } + } + + // Upgrade to a mutable references that automatically calls notify on drop. + pub fn into_mut(self) -> WatchMut<'a, T> { + WatchMut { lock: self.lock } + } +} + +impl<'a, T> Deref for WatchRef<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.lock.value + } +} + +impl<'a, T: fmt::Debug> fmt::Debug for WatchRef<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.lock.fmt(f) + } +} + +pub struct WatchMut<'a, T> { + lock: MutexGuard<'a, State>, +} + +impl<'a, T> Deref for WatchMut<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.lock.value + } +} + +impl<'a, T> DerefMut for WatchMut<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.lock.value + } +} + +impl<'a, T> Drop for WatchMut<'a, T> { + fn drop(&mut self) { + self.lock.notify(); + } +} + +impl<'a, T: fmt::Debug> fmt::Debug for WatchMut<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.lock.fmt(f) + } +} + +pub struct WatchChanged { + state: Arc>>, + epoch: usize, +} + +impl Future for WatchChanged { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll { + // TODO is there an API we can make that doesn't drop this lock? + let mut state = self.state.lock().unwrap(); + + if state.epoch > self.epoch { + task::Poll::Ready(()) + } else { + state.register(cx.waker()); + task::Poll::Pending + } + } +} diff --git a/moq-transport/src/coding/decode.rs b/moq-transport/src/coding/decode.rs new file mode 100644 index 0000000..a6fe94e --- /dev/null +++ b/moq-transport/src/coding/decode.rs @@ -0,0 +1,55 @@ +use super::{BoundsExceeded, VarInt}; +use std::{io, str}; + +use thiserror::Error; + +// I'm too lazy to add these trait bounds to every message type. +// TODO Use trait aliases when they're stable, or add these bounds to every method. +pub trait AsyncRead: tokio::io::AsyncRead + Unpin + Send {} +impl AsyncRead for webtransport_quinn::RecvStream {} +impl AsyncRead for tokio::io::Take<&mut T> where T: AsyncRead {} +impl + Unpin + Send> AsyncRead for io::Cursor {} + +#[async_trait::async_trait] +pub trait Decode: Sized { + async fn decode(r: &mut R) -> Result; +} + +/// A decode error. +#[derive(Error, Debug)] +pub enum DecodeError { + #[error("unexpected end of buffer")] + UnexpectedEnd, + + #[error("invalid string")] + InvalidString(#[from] str::Utf8Error), + + #[error("invalid message: {0:?}")] + InvalidMessage(VarInt), + + #[error("invalid role: {0:?}")] + InvalidRole(VarInt), + + #[error("invalid subscribe location")] + InvalidSubscribeLocation, + + #[error("varint bounds exceeded")] + BoundsExceeded(#[from] BoundsExceeded), + + // TODO move these to ParamError + #[error("duplicate parameter")] + DupliateParameter, + + #[error("missing parameter")] + MissingParameter, + + #[error("invalid parameter")] + InvalidParameter, + + #[error("io error: {0}")] + IoError(#[from] std::io::Error), + + // Used to signal that the stream has ended. + #[error("no more messages")] + Final, +} diff --git a/moq-transport/src/coding/encode.rs b/moq-transport/src/coding/encode.rs new file mode 100644 index 0000000..b03cdb9 --- /dev/null +++ b/moq-transport/src/coding/encode.rs @@ -0,0 +1,27 @@ +use super::BoundsExceeded; + +use thiserror::Error; + +// I'm too lazy to add these trait bounds to every message type. +// TODO Use trait aliases when they're stable, or add these bounds to every method. +pub trait AsyncWrite: tokio::io::AsyncWrite + Unpin + Send {} +impl AsyncWrite for webtransport_quinn::SendStream {} +impl AsyncWrite for Vec {} + +#[async_trait::async_trait] +pub trait Encode: Sized { + async fn encode(&self, w: &mut W) -> Result<(), EncodeError>; +} + +/// An encode error. +#[derive(Error, Debug)] +pub enum EncodeError { + #[error("varint too large")] + BoundsExceeded(#[from] BoundsExceeded), + + #[error("invalid value")] + InvalidValue, + + #[error("i/o error: {0}")] + IoError(#[from] std::io::Error), +} diff --git a/moq-transport/src/coding/mod.rs b/moq-transport/src/coding/mod.rs new file mode 100644 index 0000000..ff57b4c --- /dev/null +++ b/moq-transport/src/coding/mod.rs @@ -0,0 +1,11 @@ +mod decode; +mod encode; +mod params; +mod string; +mod varint; + +pub use decode::*; +pub use encode::*; +pub use params::*; +pub use string::*; +pub use varint::*; diff --git a/moq-transport/src/coding/params.rs b/moq-transport/src/coding/params.rs new file mode 100644 index 0000000..9cfd6f3 --- /dev/null +++ b/moq-transport/src/coding/params.rs @@ -0,0 +1,85 @@ +use std::io::Cursor; +use std::{cmp::max, collections::HashMap}; + +use tokio::io::{AsyncReadExt, AsyncWriteExt}; + +use crate::coding::{AsyncRead, AsyncWrite, Decode, Encode}; + +use crate::{ + coding::{DecodeError, EncodeError}, + VarInt, +}; + +#[derive(Default, Debug, Clone)] +pub struct Params(pub HashMap>); + +#[async_trait::async_trait] +impl Decode for Params { + async fn decode(mut r: &mut R) -> Result { + let mut params = HashMap::new(); + + // I hate this shit so much; let me encode my role and get on with my life. + let count = VarInt::decode(r).await?; + for _ in 0..count.into_inner() { + let kind = VarInt::decode(r).await?; + if params.contains_key(&kind) { + return Err(DecodeError::DupliateParameter); + } + + let size = VarInt::decode(r).await?; + + // Don't allocate the entire requested size to avoid a possible attack + // Instead, we allocate up to 1024 and keep appending as we read further. + let mut pr = r.take(size.into_inner()); + let mut buf = Vec::with_capacity(max(1024, pr.limit() as usize)); + pr.read_to_end(&mut buf).await?; + params.insert(kind, buf); + + r = pr.into_inner(); + } + + Ok(Params(params)) + } +} + +#[async_trait::async_trait] +impl Encode for Params { + async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + VarInt::try_from(self.0.len())?.encode(w).await?; + + for (kind, value) in self.0.iter() { + kind.encode(w).await?; + VarInt::try_from(value.len())?.encode(w).await?; + w.write_all(value).await?; + } + + Ok(()) + } +} + +impl Params { + pub fn new() -> Self { + Self::default() + } + + pub async fn set(&mut self, kind: VarInt, p: P) -> Result<(), EncodeError> { + let mut value = Vec::new(); + p.encode(&mut value).await?; + self.0.insert(kind, value); + + Ok(()) + } + + pub fn has(&self, kind: VarInt) -> bool { + self.0.contains_key(&kind) + } + + pub async fn get(&mut self, kind: VarInt) -> Result, DecodeError> { + if let Some(value) = self.0.remove(&kind) { + let mut cursor = Cursor::new(value); + Ok(Some(P::decode(&mut cursor).await?)) + } else { + Ok(None) + } + } +} diff --git a/moq-transport/src/coding/string.rs b/moq-transport/src/coding/string.rs new file mode 100644 index 0000000..2cdff4a --- /dev/null +++ b/moq-transport/src/coding/string.rs @@ -0,0 +1,29 @@ +use std::cmp::min; + +use crate::coding::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; + +use crate::VarInt; + +use super::{Decode, DecodeError, Encode, EncodeError}; + +#[async_trait::async_trait] +impl Encode for String { + async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + let size = VarInt::try_from(self.len())?; + size.encode(w).await?; + w.write_all(self.as_ref()).await?; + Ok(()) + } +} + +#[async_trait::async_trait] +impl Decode for String { + /// Decode a string with a varint length prefix. + async fn decode(r: &mut R) -> Result { + let size = VarInt::decode(r).await?.into_inner(); + let mut str = String::with_capacity(min(1024, size) as usize); + r.take(size).read_to_string(&mut str).await?; + Ok(str) + } +} diff --git a/moq-transport/src/coding/varint.rs b/moq-transport/src/coding/varint.rs new file mode 100644 index 0000000..8557de8 --- /dev/null +++ b/moq-transport/src/coding/varint.rs @@ -0,0 +1,232 @@ +// Based on quinn-proto +// https://github.com/quinn-rs/quinn/blob/main/quinn-proto/src/varint.rs +// Licensed via Apache 2.0 and MIT + +use std::convert::{TryFrom, TryInto}; +use std::fmt; + +use crate::coding::{AsyncRead, AsyncWrite}; +use thiserror::Error; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; + +use super::{Decode, DecodeError, Encode, EncodeError}; + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)] +#[error("value out of range")] +pub struct BoundsExceeded; + +/// An integer less than 2^62 +/// +/// Values of this type are suitable for encoding as QUIC variable-length integer. +// It would be neat if we could express to Rust that the top two bits are available for use as enum +// discriminants +#[derive(Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct VarInt(u64); + +impl VarInt { + /// The largest possible value. + pub const MAX: Self = Self((1 << 62) - 1); + + /// The smallest possible value. + pub const ZERO: Self = Self(0); + + /// Construct a `VarInt` infallibly using the largest available type. + /// Larger values need to use `try_from` instead. + pub const fn from_u32(x: u32) -> Self { + Self(x as u64) + } + + /// Extract the integer value + pub const fn into_inner(self) -> u64 { + self.0 + } +} + +impl From for u64 { + fn from(x: VarInt) -> Self { + x.0 + } +} + +impl From for usize { + fn from(x: VarInt) -> Self { + x.0 as usize + } +} + +impl From for u128 { + fn from(x: VarInt) -> Self { + x.0 as u128 + } +} + +impl From for VarInt { + fn from(x: u8) -> Self { + Self(x.into()) + } +} + +impl From for VarInt { + fn from(x: u16) -> Self { + Self(x.into()) + } +} + +impl From for VarInt { + fn from(x: u32) -> Self { + Self(x.into()) + } +} + +impl TryFrom for VarInt { + type Error = BoundsExceeded; + + /// Succeeds iff `x` < 2^62 + fn try_from(x: u64) -> Result { + if x <= Self::MAX.into_inner() { + Ok(Self(x)) + } else { + Err(BoundsExceeded) + } + } +} + +impl TryFrom for VarInt { + type Error = BoundsExceeded; + + /// Succeeds iff `x` < 2^62 + fn try_from(x: u128) -> Result { + if x <= Self::MAX.into() { + Ok(Self(x as u64)) + } else { + Err(BoundsExceeded) + } + } +} + +impl TryFrom for VarInt { + type Error = BoundsExceeded; + + /// Succeeds iff `x` < 2^62 + fn try_from(x: usize) -> Result { + Self::try_from(x as u64) + } +} + +impl TryFrom for u32 { + type Error = BoundsExceeded; + + /// Succeeds iff `x` < 2^32 + fn try_from(x: VarInt) -> Result { + if x.0 <= u32::MAX.into() { + Ok(x.0 as u32) + } else { + Err(BoundsExceeded) + } + } +} + +impl TryFrom for u16 { + type Error = BoundsExceeded; + + /// Succeeds iff `x` < 2^16 + fn try_from(x: VarInt) -> Result { + if x.0 <= u16::MAX.into() { + Ok(x.0 as u16) + } else { + Err(BoundsExceeded) + } + } +} + +impl TryFrom for u8 { + type Error = BoundsExceeded; + + /// Succeeds iff `x` < 2^8 + fn try_from(x: VarInt) -> Result { + if x.0 <= u8::MAX.into() { + Ok(x.0 as u8) + } else { + Err(BoundsExceeded) + } + } +} + +impl fmt::Debug for VarInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Display for VarInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +#[async_trait::async_trait] +impl Decode for VarInt { + /// Decode a varint from the given reader. + async fn decode(r: &mut R) -> Result { + let b = r.read_u8().await?; + Self::decode_byte(b, r).await + } +} + +impl VarInt { + /// Decode a varint given the first byte, reading the rest as needed. + /// This is silly but useful for determining if the stream has ended. + pub async fn decode_byte(b: u8, r: &mut R) -> Result { + let tag = b >> 6; + + let mut buf = [0u8; 8]; + buf[0] = b & 0b0011_1111; + + let x = match tag { + 0b00 => u64::from(buf[0]), + 0b01 => { + r.read_exact(buf[1..2].as_mut()).await?; + u64::from(u16::from_be_bytes(buf[..2].try_into().unwrap())) + } + 0b10 => { + r.read_exact(buf[1..4].as_mut()).await?; + u64::from(u32::from_be_bytes(buf[..4].try_into().unwrap())) + } + 0b11 => { + r.read_exact(buf[1..8].as_mut()).await?; + u64::from_be_bytes(buf) + } + _ => unreachable!(), + }; + + Ok(Self(x)) + } +} + +#[async_trait::async_trait] +impl Encode for VarInt { + /// Encode a varint to the given writer. + async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + let x = self.0; + if x < 2u64.pow(6) { + w.write_u8(x as u8).await?; + } else if x < 2u64.pow(14) { + w.write_u16(0b01 << 14 | x as u16).await?; + } else if x < 2u64.pow(30) { + w.write_u32(0b10 << 30 | x as u32).await?; + } else if x < 2u64.pow(62) { + w.write_u64(0b11 << 62 | x).await?; + } else { + unreachable!("malformed VarInt"); + } + + Ok(()) + } +} + +// This is a fork of quinn::VarInt. +impl From for VarInt { + fn from(v: quinn::VarInt) -> Self { + Self(v.into_inner()) + } +} diff --git a/moq-transport/src/error.rs b/moq-transport/src/error.rs new file mode 100644 index 0000000..d070251 --- /dev/null +++ b/moq-transport/src/error.rs @@ -0,0 +1,7 @@ +pub trait MoqError { + /// An integer code that is sent over the wire. + fn code(&self) -> u32; + + /// An optional reason sometimes sent over the wire. + fn reason(&self) -> String; +} diff --git a/moq-transport/src/lib.rs b/moq-transport/src/lib.rs new file mode 100644 index 0000000..08f4485 --- /dev/null +++ b/moq-transport/src/lib.rs @@ -0,0 +1,18 @@ +//! An implementation of the MoQ Transport protocol. +//! +//! MoQ Transport is a pub/sub protocol over QUIC. +//! While originally designed for live media, MoQ Transport is generic and can be used for other live applications. +//! The specification is a work in progress and will change. +//! See the [specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/) and [github](https://github.com/moq-wg/moq-transport) for any updates. +//! +//! This implementation has some required extensions until the draft stablizes. See: [Extensions](crate::setup::Extensions) +mod coding; +mod error; + +pub mod cache; +pub mod message; +pub mod session; +pub mod setup; + +pub use coding::VarInt; +pub use error::MoqError; diff --git a/moq-transport/src/message/announce.rs b/moq-transport/src/message/announce.rs new file mode 100644 index 0000000..281fffa --- /dev/null +++ b/moq-transport/src/message/announce.rs @@ -0,0 +1,30 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params}; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +/// Sent by the publisher to announce the availability of a group of tracks. +#[derive(Clone, Debug)] +pub struct Announce { + /// The track namespace + pub namespace: String, + + /// Optional parameters + pub params: Params, +} + +impl Announce { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let namespace = String::decode(r).await?; + let params = Params::decode(r).await?; + + Ok(Self { namespace, params }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.namespace.encode(w).await?; + self.params.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/message/announce_ok.rs b/moq-transport/src/message/announce_ok.rs new file mode 100644 index 0000000..300279e --- /dev/null +++ b/moq-transport/src/message/announce_ok.rs @@ -0,0 +1,23 @@ +use crate::{ + coding::{AsyncRead, AsyncWrite, Decode, DecodeError, Encode, EncodeError}, + setup::Extensions, +}; + +/// Sent by the subscriber to accept an Announce. +#[derive(Clone, Debug)] +pub struct AnnounceOk { + // Echo back the namespace that was announced. + // TODO Propose using an ID to save bytes. + pub namespace: String, +} + +impl AnnounceOk { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let namespace = String::decode(r).await?; + Ok(Self { namespace }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.namespace.encode(w).await + } +} diff --git a/moq-transport/src/message/announce_reset.rs b/moq-transport/src/message/announce_reset.rs new file mode 100644 index 0000000..24d3f81 --- /dev/null +++ b/moq-transport/src/message/announce_reset.rs @@ -0,0 +1,39 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +/// Sent by the subscriber to reject an Announce. +#[derive(Clone, Debug)] +pub struct AnnounceError { + // Echo back the namespace that was reset + pub namespace: String, + + // An error code. + pub code: u32, + + // An optional, human-readable reason. + pub reason: String, +} + +impl AnnounceError { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let namespace = String::decode(r).await?; + let code = VarInt::decode(r).await?.try_into()?; + let reason = String::decode(r).await?; + + Ok(Self { + namespace, + code, + reason, + }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.namespace.encode(w).await?; + VarInt::from_u32(self.code).encode(w).await?; + self.reason.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/message/go_away.rs b/moq-transport/src/message/go_away.rs new file mode 100644 index 0000000..7999c9a --- /dev/null +++ b/moq-transport/src/message/go_away.rs @@ -0,0 +1,21 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError}; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +/// Sent by the server to indicate that the client should connect to a different server. +#[derive(Clone, Debug)] +pub struct GoAway { + pub url: String, +} + +impl GoAway { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let url = String::decode(r).await?; + Ok(Self { url }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.url.encode(w).await + } +} diff --git a/moq-transport/src/message/mod.rs b/moq-transport/src/message/mod.rs new file mode 100644 index 0000000..d32a936 --- /dev/null +++ b/moq-transport/src/message/mod.rs @@ -0,0 +1,160 @@ +//! Low-level message sent over the wire, as defined in the specification. +//! +//! All of these messages are sent over a bidirectional QUIC stream. +//! This introduces some head-of-line blocking but preserves ordering. +//! The only exception are OBJECT "messages", which are sent over dedicated QUIC streams. +//! +//! Messages sent by the publisher: +//! - [Announce] +//! - [Unannounce] +//! - [SubscribeOk] +//! - [SubscribeError] +//! - [SubscribeReset] +//! - [Object] +//! +//! Messages sent by the subscriber: +//! - [Subscribe] +//! - [Unsubscribe] +//! - [AnnounceOk] +//! - [AnnounceError] +//! +//! Example flow: +//! ```test +//! -> ANNOUNCE namespace="foo" +//! <- ANNOUNCE_OK namespace="foo" +//! <- SUBSCRIBE id=0 namespace="foo" name="bar" +//! -> SUBSCRIBE_OK id=0 +//! -> OBJECT id=0 sequence=69 priority=4 expires=30 +//! -> OBJECT id=0 sequence=70 priority=4 expires=30 +//! -> OBJECT id=0 sequence=70 priority=4 expires=30 +//! <- SUBSCRIBE_STOP id=0 +//! -> SUBSCRIBE_RESET id=0 code=206 reason="closed by peer" +//! ``` +mod announce; +mod announce_ok; +mod announce_reset; +mod go_away; +mod object; +mod subscribe; +mod subscribe_error; +mod subscribe_fin; +mod subscribe_ok; +mod subscribe_reset; +mod unannounce; +mod unsubscribe; + +pub use announce::*; +pub use announce_ok::*; +pub use announce_reset::*; +pub use go_away::*; +pub use object::*; +pub use subscribe::*; +pub use subscribe_error::*; +pub use subscribe_fin::*; +pub use subscribe_ok::*; +pub use subscribe_reset::*; +pub use unannounce::*; +pub use unsubscribe::*; + +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; + +use std::fmt; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +// Use a macro to generate the message types rather than copy-paste. +// This implements a decode/encode method that uses the specified type. +macro_rules! message_types { + {$($name:ident = $val:expr,)*} => { + /// All supported message types. + #[derive(Clone)] + pub enum Message { + $($name($name)),* + } + + impl Message { + pub async fn decode(r: &mut R, ext: &Extensions) -> Result { + let t = VarInt::decode(r).await?; + + match t.into_inner() { + $($val => { + let msg = $name::decode(r, ext).await?; + Ok(Self::$name(msg)) + })* + _ => Err(DecodeError::InvalidMessage(t)), + } + } + + pub async fn encode(&self, w: &mut W, ext: &Extensions) -> Result<(), EncodeError> { + match self { + $(Self::$name(ref m) => { + VarInt::from_u32($val).encode(w).await?; + m.encode(w, ext).await + },)* + } + } + + pub fn id(&self) -> VarInt { + match self { + $(Self::$name(_) => { + VarInt::from_u32($val) + },)* + } + } + + pub fn name(&self) -> &'static str { + match self { + $(Self::$name(_) => { + stringify!($name) + },)* + } + } + } + + $(impl From<$name> for Message { + fn from(m: $name) -> Self { + Message::$name(m) + } + })* + + impl fmt::Debug for Message { + // Delegate to the message formatter + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + $(Self::$name(ref m) => m.fmt(f),)* + } + } + } + } +} + +// Each message is prefixed with the given VarInt type. +message_types! { + // NOTE: Object and Setup are in other modules. + // Object = 0x0 + // ObjectUnbounded = 0x2 + // SetupClient = 0x40 + // SetupServer = 0x41 + + // SUBSCRIBE family, sent by subscriber + Subscribe = 0x3, + Unsubscribe = 0xa, + + // SUBSCRIBE family, sent by publisher + SubscribeOk = 0x4, + SubscribeError = 0x5, + SubscribeFin = 0xb, + SubscribeReset = 0xc, + + // ANNOUNCE family, sent by publisher + Announce = 0x6, + Unannounce = 0x9, + + // ANNOUNCE family, sent by subscriber + AnnounceOk = 0x7, + AnnounceError = 0x8, + + // Misc + GoAway = 0x10, +} diff --git a/moq-transport/src/message/object.rs b/moq-transport/src/message/object.rs new file mode 100644 index 0000000..90efa23 --- /dev/null +++ b/moq-transport/src/message/object.rs @@ -0,0 +1,108 @@ +use std::{io, time}; + +use tokio::io::AsyncReadExt; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; +use crate::setup; + +/// Sent by the publisher as the header of each data stream. +#[derive(Clone, Debug)] +pub struct Object { + // An ID for this track. + // Proposal: https://github.com/moq-wg/moq-transport/issues/209 + pub track: VarInt, + + // The sequence number within the track. + pub group: VarInt, + + // The sequence number within the group. + pub sequence: VarInt, + + // The priority, where **smaller** values are sent first. + pub priority: u32, + + // Cache the object for at most this many seconds. + // Zero means never expire. + pub expires: Option, + + /// An optional size, allowing multiple OBJECTs on the same stream. + pub size: Option, +} + +impl Object { + pub async fn decode(r: &mut R, extensions: &setup::Extensions) -> Result { + // Try reading the first byte, returning a special error if the stream naturally ended. + let typ = match r.read_u8().await { + Ok(b) => VarInt::decode_byte(b, r).await?, + Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => return Err(DecodeError::Final), + Err(e) => return Err(e.into()), + }; + + let size_present = match typ.into_inner() { + 0 => false, + 2 => true, + _ => return Err(DecodeError::InvalidMessage(typ)), + }; + + let track = VarInt::decode(r).await?; + let group = VarInt::decode(r).await?; + let sequence = VarInt::decode(r).await?; + let priority = VarInt::decode(r).await?.try_into()?; + + let expires = match extensions.object_expires { + true => match VarInt::decode(r).await?.into_inner() { + 0 => None, + secs => Some(time::Duration::from_secs(secs)), + }, + false => None, + }; + + // The presence of the size field depends on the type. + let size = match size_present { + true => Some(VarInt::decode(r).await?), + false => None, + }; + + Ok(Self { + track, + group, + sequence, + priority, + expires, + size, + }) + } + + pub async fn encode(&self, w: &mut W, extensions: &setup::Extensions) -> Result<(), EncodeError> { + // The kind changes based on the presence of the size. + let kind = match self.size { + Some(_) => VarInt::from_u32(2), + None => VarInt::ZERO, + }; + + kind.encode(w).await?; + self.track.encode(w).await?; + self.group.encode(w).await?; + self.sequence.encode(w).await?; + VarInt::from_u32(self.priority).encode(w).await?; + + // Round up if there's any decimal points. + let expires = match self.expires { + None => 0, + Some(time::Duration::ZERO) => return Err(EncodeError::InvalidValue), // there's no way of expressing zero currently. + Some(expires) if expires.subsec_nanos() > 0 => expires.as_secs() + 1, + Some(expires) => expires.as_secs(), + }; + + if extensions.object_expires { + VarInt::try_from(expires)?.encode(w).await?; + } + + if let Some(size) = self.size { + size.encode(w).await?; + } + + Ok(()) + } +} diff --git a/moq-transport/src/message/subscribe.rs b/moq-transport/src/message/subscribe.rs new file mode 100644 index 0000000..e64d5a1 --- /dev/null +++ b/moq-transport/src/message/subscribe.rs @@ -0,0 +1,142 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params, VarInt}; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +/// Sent by the subscriber to request all future objects for the given track. +/// +/// Objects will use the provided ID instead of the full track name, to save bytes. +#[derive(Clone, Debug)] +pub struct Subscribe { + /// An ID we choose so we can map to the track_name. + // Proposal: https://github.com/moq-wg/moq-transport/issues/209 + pub id: VarInt, + + /// The track namespace. + /// + /// Must be None if `extensions.subscribe_split` is false. + pub namespace: Option, + + /// The track name. + pub name: String, + + /// The start/end group/object. + pub start_group: SubscribeLocation, + pub start_object: SubscribeLocation, + pub end_group: SubscribeLocation, + pub end_object: SubscribeLocation, + + /// Optional parameters + pub params: Params, +} + +impl Subscribe { + pub async fn decode(r: &mut R, ext: &Extensions) -> Result { + let id = VarInt::decode(r).await?; + + let namespace = match ext.subscribe_split { + true => Some(String::decode(r).await?), + false => None, + }; + + let name = String::decode(r).await?; + + let start_group = SubscribeLocation::decode(r).await?; + let start_object = SubscribeLocation::decode(r).await?; + let end_group = SubscribeLocation::decode(r).await?; + let end_object = SubscribeLocation::decode(r).await?; + + // You can't have a start object without a start group. + if start_group == SubscribeLocation::None && start_object != SubscribeLocation::None { + return Err(DecodeError::InvalidSubscribeLocation); + } + + // You can't have an end object without an end group. + if end_group == SubscribeLocation::None && end_object != SubscribeLocation::None { + return Err(DecodeError::InvalidSubscribeLocation); + } + + // NOTE: There's some more location restrictions in the draft, but they're enforced at a higher level. + + let params = Params::decode(r).await?; + + Ok(Self { + id, + namespace, + name, + start_group, + start_object, + end_group, + end_object, + params, + }) + } + + pub async fn encode(&self, w: &mut W, ext: &Extensions) -> Result<(), EncodeError> { + self.id.encode(w).await?; + + if self.namespace.is_some() != ext.subscribe_split { + panic!("namespace must be None if subscribe_split is false"); + } + + if ext.subscribe_split { + self.namespace.as_ref().unwrap().encode(w).await?; + } + + self.name.encode(w).await?; + + self.start_group.encode(w).await?; + self.start_object.encode(w).await?; + self.end_group.encode(w).await?; + self.end_object.encode(w).await?; + + self.params.encode(w).await?; + + Ok(()) + } +} + +/// Signal where the subscription should begin, relative to the current cache. +#[derive(Clone, Debug, PartialEq)] +pub enum SubscribeLocation { + None, + Absolute(VarInt), + Latest(VarInt), + Future(VarInt), +} + +impl SubscribeLocation { + pub async fn decode(r: &mut R) -> Result { + let kind = VarInt::decode(r).await?; + + match kind.into_inner() { + 0 => Ok(Self::None), + 1 => Ok(Self::Absolute(VarInt::decode(r).await?)), + 2 => Ok(Self::Latest(VarInt::decode(r).await?)), + 3 => Ok(Self::Future(VarInt::decode(r).await?)), + _ => Err(DecodeError::InvalidSubscribeLocation), + } + } + + pub async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + match self { + Self::None => { + VarInt::from_u32(0).encode(w).await?; + } + Self::Absolute(val) => { + VarInt::from_u32(1).encode(w).await?; + val.encode(w).await?; + } + Self::Latest(val) => { + VarInt::from_u32(2).encode(w).await?; + val.encode(w).await?; + } + Self::Future(val) => { + VarInt::from_u32(3).encode(w).await?; + val.encode(w).await?; + } + } + + Ok(()) + } +} diff --git a/moq-transport/src/message/subscribe_error.rs b/moq-transport/src/message/subscribe_error.rs new file mode 100644 index 0000000..9ef4c91 --- /dev/null +++ b/moq-transport/src/message/subscribe_error.rs @@ -0,0 +1,36 @@ +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; +use crate::setup::Extensions; + +/// Sent by the publisher to reject a Subscribe. +#[derive(Clone, Debug)] +pub struct SubscribeError { + // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 + + // The ID for this subscription. + pub id: VarInt, + + // An error code. + pub code: u32, + + // An optional, human-readable reason. + pub reason: String, +} + +impl SubscribeError { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let id = VarInt::decode(r).await?; + let code = VarInt::decode(r).await?.try_into()?; + let reason = String::decode(r).await?; + + Ok(Self { id, code, reason }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.id.encode(w).await?; + VarInt::from_u32(self.code).encode(w).await?; + self.reason.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/message/subscribe_fin.rs b/moq-transport/src/message/subscribe_fin.rs new file mode 100644 index 0000000..b070971 --- /dev/null +++ b/moq-transport/src/message/subscribe_fin.rs @@ -0,0 +1,37 @@ +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; +use crate::setup::Extensions; + +/// Sent by the publisher to cleanly terminate a Subscribe. +#[derive(Clone, Debug)] +pub struct SubscribeFin { + // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 + /// The ID for this subscription. + pub id: VarInt, + + /// The final group/object sent on this subscription. + pub final_group: VarInt, + pub final_object: VarInt, +} + +impl SubscribeFin { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let id = VarInt::decode(r).await?; + let final_group = VarInt::decode(r).await?; + let final_object = VarInt::decode(r).await?; + + Ok(Self { + id, + final_group, + final_object, + }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.id.encode(w).await?; + self.final_group.encode(w).await?; + self.final_object.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/message/subscribe_ok.rs b/moq-transport/src/message/subscribe_ok.rs new file mode 100644 index 0000000..11864e6 --- /dev/null +++ b/moq-transport/src/message/subscribe_ok.rs @@ -0,0 +1,31 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +/// Sent by the publisher to accept a Subscribe. +#[derive(Clone, Debug)] +pub struct SubscribeOk { + // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 + /// The ID for this track. + pub id: VarInt, + + /// The subscription will expire in this many milliseconds. + pub expires: VarInt, +} + +impl SubscribeOk { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let id = VarInt::decode(r).await?; + let expires = VarInt::decode(r).await?; + Ok(Self { id, expires }) + } +} + +impl SubscribeOk { + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.id.encode(w).await?; + self.expires.encode(w).await?; + Ok(()) + } +} diff --git a/moq-transport/src/message/subscribe_reset.rs b/moq-transport/src/message/subscribe_reset.rs new file mode 100644 index 0000000..e488b28 --- /dev/null +++ b/moq-transport/src/message/subscribe_reset.rs @@ -0,0 +1,50 @@ +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; +use crate::setup::Extensions; + +/// Sent by the publisher to terminate a Subscribe. +#[derive(Clone, Debug)] +pub struct SubscribeReset { + // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 + /// The ID for this subscription. + pub id: VarInt, + + /// An error code. + pub code: u32, + + /// An optional, human-readable reason. + pub reason: String, + + /// The final group/object sent on this subscription. + pub final_group: VarInt, + pub final_object: VarInt, +} + +impl SubscribeReset { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let id = VarInt::decode(r).await?; + let code = VarInt::decode(r).await?.try_into()?; + let reason = String::decode(r).await?; + let final_group = VarInt::decode(r).await?; + let final_object = VarInt::decode(r).await?; + + Ok(Self { + id, + code, + reason, + final_group, + final_object, + }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.id.encode(w).await?; + VarInt::from_u32(self.code).encode(w).await?; + self.reason.encode(w).await?; + + self.final_group.encode(w).await?; + self.final_object.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/message/unannounce.rs b/moq-transport/src/message/unannounce.rs new file mode 100644 index 0000000..a2c2e39 --- /dev/null +++ b/moq-transport/src/message/unannounce.rs @@ -0,0 +1,25 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError}; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +/// Sent by the publisher to terminate an Announce. +#[derive(Clone, Debug)] +pub struct Unannounce { + // Echo back the namespace that was reset + pub namespace: String, +} + +impl Unannounce { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let namespace = String::decode(r).await?; + + Ok(Self { namespace }) + } + + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.namespace.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/message/unsubscribe.rs b/moq-transport/src/message/unsubscribe.rs new file mode 100644 index 0000000..5361f59 --- /dev/null +++ b/moq-transport/src/message/unsubscribe.rs @@ -0,0 +1,27 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; + +use crate::coding::{AsyncRead, AsyncWrite}; +use crate::setup::Extensions; + +/// Sent by the subscriber to terminate a Subscribe. +#[derive(Clone, Debug)] +pub struct Unsubscribe { + // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 + + // The ID for this subscription. + pub id: VarInt, +} + +impl Unsubscribe { + pub async fn decode(r: &mut R, _ext: &Extensions) -> Result { + let id = VarInt::decode(r).await?; + Ok(Self { id }) + } +} + +impl Unsubscribe { + pub async fn encode(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { + self.id.encode(w).await?; + Ok(()) + } +} diff --git a/moq-transport/src/session/client.rs b/moq-transport/src/session/client.rs new file mode 100644 index 0000000..243fe4e --- /dev/null +++ b/moq-transport/src/session/client.rs @@ -0,0 +1,76 @@ +use super::{Control, Publisher, SessionError, Subscriber}; +use crate::{cache::broadcast, setup}; +use webtransport_quinn::Session; + +/// An endpoint that connects to a URL to publish and/or consume live streams. +pub struct Client {} + +impl Client { + /// Connect using an established WebTransport session, performing the MoQ handshake as a publisher. + pub async fn publisher(session: Session, source: broadcast::Subscriber) -> Result { + let control = Self::send_setup(&session, setup::Role::Publisher).await?; + let publisher = Publisher::new(session, control, source); + Ok(publisher) + } + + /// Connect using an established WebTransport session, performing the MoQ handshake as a subscriber. + pub async fn subscriber(session: Session, source: broadcast::Publisher) -> Result { + let control = Self::send_setup(&session, setup::Role::Subscriber).await?; + let subscriber = Subscriber::new(session, control, source); + Ok(subscriber) + } + + // TODO support performing both roles + /* + pub async fn connect(self) -> anyhow::Result<(Publisher, Subscriber)> { + self.connect_role(setup::Role::Both).await + } + */ + + async fn send_setup(session: &Session, role: setup::Role) -> Result { + let mut control = session.open_bi().await?; + + let versions: setup::Versions = [setup::Version::DRAFT_01, setup::Version::KIXEL_01].into(); + + let client = setup::Client { + role, + versions: versions.clone(), + params: Default::default(), + + // Offer all extensions + extensions: setup::Extensions { + object_expires: true, + subscriber_id: true, + subscribe_split: true, + }, + }; + + log::debug!("sending client SETUP: {:?}", client); + client.encode(&mut control.0).await?; + + let mut server = setup::Server::decode(&mut control.1).await?; + + log::debug!("received server SETUP: {:?}", server); + + match server.version { + setup::Version::DRAFT_01 => { + // We always require this extension + server.extensions.require_subscriber_id()?; + + if server.role.is_publisher() { + // We only require object expires if we're a subscriber, so we don't cache objects indefinitely. + server.extensions.require_object_expires()?; + } + } + setup::Version::KIXEL_01 => { + // KIXEL_01 didn't support extensions; all were enabled. + server.extensions = client.extensions.clone() + } + _ => return Err(SessionError::Version(versions, [server.version].into())), + } + + let control = Control::new(control.0, control.1, server.extensions); + + Ok(control) + } +} diff --git a/moq-transport/src/session/control.rs b/moq-transport/src/session/control.rs new file mode 100644 index 0000000..0686650 --- /dev/null +++ b/moq-transport/src/session/control.rs @@ -0,0 +1,45 @@ +// A helper class to guard sending control messages behind a Mutex. + +use std::{fmt, sync::Arc}; + +use tokio::sync::Mutex; +use webtransport_quinn::{RecvStream, SendStream}; + +use super::SessionError; +use crate::{message::Message, setup::Extensions}; + +#[derive(Debug, Clone)] +pub(crate) struct Control { + send: Arc>, + recv: Arc>, + pub ext: Extensions, +} + +impl Control { + pub fn new(send: SendStream, recv: RecvStream, ext: Extensions) -> Self { + Self { + send: Arc::new(Mutex::new(send)), + recv: Arc::new(Mutex::new(recv)), + ext, + } + } + + pub async fn send + fmt::Debug>(&self, msg: T) -> Result<(), SessionError> { + let mut stream = self.send.lock().await; + log::info!("sending message: {:?}", msg); + msg.into() + .encode(&mut *stream, &self.ext) + .await + .map_err(|e| SessionError::Unknown(e.to_string()))?; + Ok(()) + } + + // It's likely a mistake to call this from two different tasks, but it's easier to just support it. + pub async fn recv(&self) -> Result { + let mut stream = self.recv.lock().await; + let msg = Message::decode(&mut *stream, &self.ext) + .await + .map_err(|e| SessionError::Unknown(e.to_string()))?; + Ok(msg) + } +} diff --git a/moq-transport/src/session/error.rs b/moq-transport/src/session/error.rs new file mode 100644 index 0000000..228a4c8 --- /dev/null +++ b/moq-transport/src/session/error.rs @@ -0,0 +1,107 @@ +use crate::{cache, coding, setup, MoqError, VarInt}; + +#[derive(thiserror::Error, Debug)] +pub enum SessionError { + #[error("webtransport error: {0}")] + Session(#[from] webtransport_quinn::SessionError), + + #[error("cache error: {0}")] + Cache(#[from] cache::CacheError), + + #[error("encode error: {0}")] + Encode(#[from] coding::EncodeError), + + #[error("decode error: {0}")] + Decode(#[from] coding::DecodeError), + + #[error("unsupported versions: client={0:?} server={1:?}")] + Version(setup::Versions, setup::Versions), + + #[error("incompatible roles: client={0:?} server={1:?}")] + RoleIncompatible(setup::Role, setup::Role), + + /// An error occured while reading from the QUIC stream. + #[error("failed to read from stream: {0}")] + Read(#[from] webtransport_quinn::ReadError), + + /// An error occured while writing to the QUIC stream. + #[error("failed to write to stream: {0}")] + Write(#[from] webtransport_quinn::WriteError), + + /// The role negiotiated in the handshake was violated. For example, a publisher sent a SUBSCRIBE, or a subscriber sent an OBJECT. + #[error("role violation: msg={0}")] + RoleViolation(VarInt), + + /// Our enforced stream mapping was disrespected. + #[error("stream mapping conflict")] + StreamMapping, + + /// The priority was invalid. + #[error("invalid priority: {0}")] + InvalidPriority(VarInt), + + /// The size was invalid. + #[error("invalid size: {0}")] + InvalidSize(VarInt), + + /// A required extension was not offered. + #[error("required extension not offered: {0:?}")] + RequiredExtension(VarInt), + + /// Some VarInt was too large and we were too lazy to handle it + #[error("varint bounds exceeded")] + BoundsExceeded(#[from] coding::BoundsExceeded), + + /// An unclassified error because I'm lazy. TODO classify these errors + #[error("unknown error: {0}")] + Unknown(String), +} + +impl MoqError for SessionError { + /// An integer code that is sent over the wire. + fn code(&self) -> u32 { + match self { + Self::Cache(err) => err.code(), + Self::RoleIncompatible(..) => 406, + Self::RoleViolation(..) => 405, + Self::StreamMapping => 409, + Self::Unknown(_) => 500, + Self::Write(_) => 501, + Self::Read(_) => 502, + Self::Session(_) => 503, + Self::Version(..) => 406, + Self::Encode(_) => 500, + Self::Decode(_) => 500, + Self::InvalidPriority(_) => 400, + Self::InvalidSize(_) => 400, + Self::RequiredExtension(_) => 426, + Self::BoundsExceeded(_) => 500, + } + } + + /// A reason that is sent over the wire. + fn reason(&self) -> String { + match self { + Self::Cache(err) => err.reason(), + Self::RoleViolation(kind) => format!("role violation for message type {:?}", kind), + Self::RoleIncompatible(client, server) => { + format!( + "role incompatible: client wanted {:?} but server wanted {:?}", + client, server + ) + } + Self::Read(err) => format!("read error: {}", err), + Self::Write(err) => format!("write error: {}", err), + Self::Session(err) => format!("session error: {}", err), + Self::Unknown(err) => format!("unknown error: {}", err), + Self::Version(client, server) => format!("unsupported versions: client={:?} server={:?}", client, server), + Self::Encode(err) => format!("encode error: {}", err), + Self::Decode(err) => format!("decode error: {}", err), + Self::StreamMapping => "streaming mapping conflict".to_owned(), + Self::InvalidPriority(priority) => format!("invalid priority: {}", priority), + Self::InvalidSize(size) => format!("invalid size: {}", size), + Self::RequiredExtension(id) => format!("required extension was missing: {:?}", id), + Self::BoundsExceeded(_) => "varint bounds exceeded".to_string(), + } + } +} diff --git a/moq-transport/src/session/mod.rs b/moq-transport/src/session/mod.rs new file mode 100644 index 0000000..50b36a9 --- /dev/null +++ b/moq-transport/src/session/mod.rs @@ -0,0 +1,27 @@ +//! A MoQ Transport session, on top of a WebTransport session, on top of a QUIC connection. +//! +//! The handshake is relatively simple but split into different steps. +//! All of these handshakes slightly differ depending on if the endpoint is a client or server. +//! 1. Complete the QUIC handhake. +//! 2. Complete the WebTransport handshake. +//! 3. Complete the MoQ handshake. +//! +//! Use [Client] or [Server] for the MoQ handshake depending on the endpoint. +//! Then, decide if you want to create a [Publisher] or [Subscriber], or both (TODO). +//! +//! A [Publisher] can announce broadcasts, which will automatically be served over the network. +//! A [Subscriber] can subscribe to broadcasts, which will automatically be served over the network. + +mod client; +mod control; +mod error; +mod publisher; +mod server; +mod subscriber; + +pub use client::*; +pub(crate) use control::*; +pub use error::*; +pub use publisher::*; +pub use server::*; +pub use subscriber::*; diff --git a/moq-transport/src/session/publisher.rs b/moq-transport/src/session/publisher.rs new file mode 100644 index 0000000..cf19bd3 --- /dev/null +++ b/moq-transport/src/session/publisher.rs @@ -0,0 +1,237 @@ +use std::{ + collections::{hash_map, HashMap}, + sync::{Arc, Mutex}, +}; + +use tokio::task::AbortHandle; +use webtransport_quinn::Session; + +use crate::{ + cache::{broadcast, segment, track, CacheError}, + message, + message::Message, + MoqError, VarInt, +}; + +use super::{Control, SessionError}; + +/// Serves broadcasts over the network, automatically handling subscriptions and caching. +// TODO Clone specific fields when a task actually needs it. +#[derive(Clone, Debug)] +pub struct Publisher { + // A map of active subscriptions, containing an abort handle to cancel them. + subscribes: Arc>>, + webtransport: Session, + control: Control, + source: broadcast::Subscriber, +} + +impl Publisher { + pub(crate) fn new(webtransport: Session, control: Control, source: broadcast::Subscriber) -> Self { + Self { + webtransport, + control, + subscribes: Default::default(), + source, + } + } + + // TODO Serve a broadcast without sending an ANNOUNCE. + // fn serve(&mut self, broadcast: broadcast::Subscriber) -> Result<(), SessionError> { + + // TODO Wait until the next subscribe that doesn't route to an ANNOUNCE. + // pub async fn subscribed(&mut self) -> Result { + + pub async fn run(mut self) -> Result<(), SessionError> { + let res = self.run_inner().await; + + // Terminate all active subscribes on error. + self.subscribes + .lock() + .unwrap() + .drain() + .for_each(|(_, abort)| abort.abort()); + + res + } + + pub async fn run_inner(&mut self) -> Result<(), SessionError> { + loop { + tokio::select! { + stream = self.webtransport.accept_uni() => { + stream?; + return Err(SessionError::RoleViolation(VarInt::ZERO)); + } + // NOTE: this is not cancel safe, but it's fine since the other branchs are fatal. + msg = self.control.recv() => { + let msg = msg?; + + log::info!("message received: {:?}", msg); + if let Err(err) = self.recv_message(&msg).await { + log::warn!("message error: {:?} {:?}", err, msg); + } + }, + // No more broadcasts are available. + err = self.source.closed() => { + self.webtransport.close(err.code(), err.reason().as_bytes()); + return Ok(()); + }, + } + } + } + + async fn recv_message(&mut self, msg: &Message) -> Result<(), SessionError> { + match msg { + Message::AnnounceOk(msg) => self.recv_announce_ok(msg).await, + Message::AnnounceError(msg) => self.recv_announce_error(msg).await, + Message::Subscribe(msg) => self.recv_subscribe(msg).await, + Message::Unsubscribe(msg) => self.recv_unsubscribe(msg).await, + _ => Err(SessionError::RoleViolation(msg.id())), + } + } + + async fn recv_announce_ok(&mut self, _msg: &message::AnnounceOk) -> Result<(), SessionError> { + // We didn't send an announce. + Err(CacheError::NotFound.into()) + } + + async fn recv_announce_error(&mut self, _msg: &message::AnnounceError) -> Result<(), SessionError> { + // We didn't send an announce. + Err(CacheError::NotFound.into()) + } + + async fn recv_subscribe(&mut self, msg: &message::Subscribe) -> Result<(), SessionError> { + // Assume that the subscribe ID is unique for now. + let abort = match self.start_subscribe(msg.clone()) { + Ok(abort) => abort, + Err(err) => return self.reset_subscribe(msg.id, err).await, + }; + + // Insert the abort handle into the lookup table. + match self.subscribes.lock().unwrap().entry(msg.id) { + hash_map::Entry::Occupied(_) => return Err(CacheError::Duplicate.into()), // TODO fatal, because we already started the task + hash_map::Entry::Vacant(entry) => entry.insert(abort), + }; + + self.control + .send(message::SubscribeOk { + id: msg.id, + expires: VarInt::ZERO, + }) + .await + } + + async fn reset_subscribe(&mut self, id: VarInt, err: E) -> Result<(), SessionError> { + let msg = message::SubscribeReset { + id, + code: err.code(), + reason: err.reason(), + + // TODO properly populate these + // But first: https://github.com/moq-wg/moq-transport/issues/313 + final_group: VarInt::ZERO, + final_object: VarInt::ZERO, + }; + + self.control.send(msg).await + } + + fn start_subscribe(&mut self, msg: message::Subscribe) -> Result { + // We currently don't use the namespace field in SUBSCRIBE + // Make sure the namespace is empty if it's provided. + if msg.namespace.as_ref().map_or(false, |namespace| !namespace.is_empty()) { + return Err(CacheError::NotFound.into()); + } + + let mut track = self.source.get_track(&msg.name)?; + + // TODO only clone the fields we need + let mut this = self.clone(); + + let handle = tokio::spawn(async move { + log::info!("serving track: name={}", track.name); + + let res = this.run_subscribe(msg.id, &mut track).await; + if let Err(err) = &res { + log::warn!("failed to serve track: name={} err={:#?}", track.name, err); + } + + // Make sure we send a reset at the end. + let err = res.err().unwrap_or(CacheError::Closed.into()); + this.reset_subscribe(msg.id, err).await.ok(); + + // We're all done, so clean up the abort handle. + this.subscribes.lock().unwrap().remove(&msg.id); + }); + + Ok(handle.abort_handle()) + } + + async fn run_subscribe(&self, id: VarInt, track: &mut track::Subscriber) -> Result<(), SessionError> { + // TODO add an Ok method to track::Publisher so we can send SUBSCRIBE_OK + + while let Some(mut segment) = track.segment().await? { + // TODO only clone the fields we need + let this = self.clone(); + + tokio::spawn(async move { + if let Err(err) = this.run_segment(id, &mut segment).await { + log::warn!("failed to serve segment: {:?}", err) + } + }); + } + + Ok(()) + } + + async fn run_segment(&self, id: VarInt, segment: &mut segment::Subscriber) -> Result<(), SessionError> { + log::trace!("serving group: {:?}", segment); + + let mut stream = self.webtransport.open_uni().await?; + + // Convert the u32 to a i32, since the Quinn set_priority is signed. + let priority = (segment.priority as i64 - i32::MAX as i64) as i32; + stream.set_priority(priority).ok(); + + while let Some(mut fragment) = segment.fragment().await? { + log::trace!("serving fragment: {:?}", fragment); + + let object = message::Object { + track: id, + + // Properties of the segment + group: segment.sequence, + priority: segment.priority, + expires: segment.expires, + + // Properties of the fragment + sequence: fragment.sequence, + size: fragment.size.map(VarInt::try_from).transpose()?, + }; + + object + .encode(&mut stream, &self.control.ext) + .await + .map_err(|e| SessionError::Unknown(e.to_string()))?; + + while let Some(chunk) = fragment.chunk().await? { + //log::trace!("writing chunk: {:?}", chunk); + stream.write_all(&chunk).await?; + } + } + + Ok(()) + } + + async fn recv_unsubscribe(&mut self, msg: &message::Unsubscribe) -> Result<(), SessionError> { + let abort = self + .subscribes + .lock() + .unwrap() + .remove(&msg.id) + .ok_or(CacheError::NotFound)?; + abort.abort(); + + self.reset_subscribe(msg.id, CacheError::Stop).await + } +} diff --git a/moq-transport/src/session/server.rs b/moq-transport/src/session/server.rs new file mode 100644 index 0000000..215fe94 --- /dev/null +++ b/moq-transport/src/session/server.rs @@ -0,0 +1,116 @@ +use super::{Control, Publisher, SessionError, Subscriber}; +use crate::{cache::broadcast, setup}; + +use webtransport_quinn::{RecvStream, SendStream, Session}; + +/// An endpoint that accepts connections, publishing and/or consuming live streams. +pub struct Server {} + +impl Server { + /// Accept an established Webtransport session, performing the MoQ handshake. + /// + /// This returns a [Request] half-way through the handshake that allows the application to accept or deny the session. + pub async fn accept(session: Session) -> Result { + let mut control = session.accept_bi().await?; + + let mut client = setup::Client::decode(&mut control.1).await?; + + log::debug!("received client SETUP: {:?}", client); + + if client.versions.contains(&setup::Version::DRAFT_01) { + // We always require subscriber ID. + client.extensions.require_subscriber_id()?; + + // We require OBJECT_EXPIRES for publishers only. + if client.role.is_publisher() { + client.extensions.require_object_expires()?; + } + + // We don't require SUBSCRIBE_SPLIT since it's easy enough to support, but it's clearly an oversight. + // client.extensions.require(&Extension::SUBSCRIBE_SPLIT)?; + } else if client.versions.contains(&setup::Version::KIXEL_01) { + // Extensions didn't exist in KIXEL_01, so we set them manually. + client.extensions = setup::Extensions { + object_expires: true, + subscriber_id: true, + subscribe_split: true, + }; + } else { + return Err(SessionError::Version( + client.versions, + [setup::Version::DRAFT_01, setup::Version::KIXEL_01].into(), + )); + } + + Ok(Request { + session, + client, + control, + }) + } +} + +/// A partially complete MoQ Transport handshake. +pub struct Request { + session: Session, + client: setup::Client, + control: (SendStream, RecvStream), +} + +impl Request { + /// Accept the session as a publisher, using the provided broadcast to serve subscriptions. + pub async fn publisher(mut self, source: broadcast::Subscriber) -> Result { + let setup = self.setup(setup::Role::Publisher)?; + setup.encode(&mut self.control.0).await?; + + let control = Control::new(self.control.0, self.control.1, setup.extensions); + let publisher = Publisher::new(self.session, control, source); + Ok(publisher) + } + + /// Accept the session as a subscriber only. + pub async fn subscriber(mut self, source: broadcast::Publisher) -> Result { + let setup = self.setup(setup::Role::Subscriber)?; + setup.encode(&mut self.control.0).await?; + + let control = Control::new(self.control.0, self.control.1, setup.extensions); + let subscriber = Subscriber::new(self.session, control, source); + Ok(subscriber) + } + + // TODO Accept the session and perform both roles. + /* + pub async fn accept(self) -> anyhow::Result<(Publisher, Subscriber)> { + self.ok(setup::Role::Both).await + } + */ + + fn setup(&mut self, role: setup::Role) -> Result { + let server = setup::Server { + role, + version: setup::Version::DRAFT_01, + extensions: self.client.extensions.clone(), + params: Default::default(), + }; + + log::debug!("sending server SETUP: {:?}", server); + + // We need to sure we support the opposite of the client's role. + // ex. if the client is a publisher, we must be a subscriber ONLY. + if !self.client.role.is_compatible(server.role) { + return Err(SessionError::RoleIncompatible(self.client.role, server.role)); + } + + Ok(server) + } + + /// Reject the request, closing the Webtransport session. + pub fn reject(self, code: u32) { + self.session.close(code, b"") + } + + /// The role advertised by the client. + pub fn role(&self) -> setup::Role { + self.client.role + } +} diff --git a/moq-transport/src/session/subscriber.rs b/moq-transport/src/session/subscriber.rs new file mode 100644 index 0000000..02b5fbd --- /dev/null +++ b/moq-transport/src/session/subscriber.rs @@ -0,0 +1,211 @@ +use webtransport_quinn::{RecvStream, Session}; + +use std::{ + collections::HashMap, + sync::{atomic, Arc, Mutex}, +}; + +use crate::{ + cache::{broadcast, segment, track, CacheError}, + coding::DecodeError, + message, + message::Message, + session::{Control, SessionError}, + VarInt, +}; + +/// Receives broadcasts over the network, automatically handling subscriptions and caching. +// TODO Clone specific fields when a task actually needs it. +#[derive(Clone, Debug)] +pub struct Subscriber { + // The webtransport session. + webtransport: Session, + + // The list of active subscriptions, each guarded by an mutex. + subscribes: Arc>>, + + // The sequence number for the next subscription. + next: Arc, + + // A channel for sending messages. + control: Control, + + // All unknown subscribes comes here. + source: broadcast::Publisher, +} + +impl Subscriber { + pub(crate) fn new(webtransport: Session, control: Control, source: broadcast::Publisher) -> Self { + Self { + webtransport, + subscribes: Default::default(), + next: Default::default(), + control, + source, + } + } + + pub async fn run(self) -> Result<(), SessionError> { + let inbound = self.clone().run_inbound(); + let streams = self.clone().run_streams(); + let source = self.clone().run_source(); + + // Return the first error. + tokio::select! { + res = inbound => res, + res = streams => res, + res = source => res, + } + } + + async fn run_inbound(mut self) -> Result<(), SessionError> { + loop { + let msg = self.control.recv().await?; + + log::info!("message received: {:?}", msg); + if let Err(err) = self.recv_message(&msg) { + log::warn!("message error: {:?} {:?}", err, msg); + } + } + } + + fn recv_message(&mut self, msg: &Message) -> Result<(), SessionError> { + match msg { + Message::Announce(_) => Ok(()), // don't care + Message::Unannounce(_) => Ok(()), // also don't care + Message::SubscribeOk(_msg) => Ok(()), // don't care + Message::SubscribeReset(msg) => self.recv_subscribe_error(msg.id, CacheError::Reset(msg.code)), + Message::SubscribeFin(msg) => self.recv_subscribe_error(msg.id, CacheError::Closed), + Message::SubscribeError(msg) => self.recv_subscribe_error(msg.id, CacheError::Reset(msg.code)), + Message::GoAway(_msg) => unimplemented!("GOAWAY"), + _ => Err(SessionError::RoleViolation(msg.id())), + } + } + + fn recv_subscribe_error(&mut self, id: VarInt, err: CacheError) -> Result<(), SessionError> { + let mut subscribes = self.subscribes.lock().unwrap(); + let subscribe = subscribes.remove(&id).ok_or(CacheError::NotFound)?; + subscribe.close(err)?; + + Ok(()) + } + + async fn run_streams(self) -> Result<(), SessionError> { + loop { + // Accept all incoming unidirectional streams. + let stream = self.webtransport.accept_uni().await?; + let this = self.clone(); + + tokio::spawn(async move { + if let Err(err) = this.run_stream(stream).await { + log::warn!("failed to receive stream: err={:#?}", err); + } + }); + } + } + + async fn run_stream(self, mut stream: RecvStream) -> Result<(), SessionError> { + // Decode the object on the data stream. + let mut object = message::Object::decode(&mut stream, &self.control.ext) + .await + .map_err(|e| SessionError::Unknown(e.to_string()))?; + + log::trace!("first object: {:?}", object); + + // A new scope is needed because the async compiler is dumb + let mut segment = { + let mut subscribes = self.subscribes.lock().unwrap(); + let track = subscribes.get_mut(&object.track).ok_or(CacheError::NotFound)?; + + track.create_segment(segment::Info { + sequence: object.group, + priority: object.priority, + expires: object.expires, + })? + }; + + log::trace!("received segment: {:?}", segment); + + // Create the first fragment + let mut fragment = segment.push_fragment(object.sequence, object.size.map(usize::from))?; + let mut remain = object.size.map(usize::from); + + loop { + if let Some(0) = remain { + // Decode the next object from the stream. + let next = match message::Object::decode(&mut stream, &self.control.ext).await { + Ok(next) => next, + + // No more objects + Err(DecodeError::Final) => break, + + // Unknown error + Err(err) => return Err(err.into()), + }; + + log::trace!("next object: {:?}", object); + + // NOTE: This is a custom restriction; not part of the moq-transport draft. + // We require every OBJECT to contain the same priority since prioritization is done per-stream. + // We also require every OBJECT to contain the same group so we know when the group ends, and can detect gaps. + if next.priority != object.priority && next.group != object.group { + return Err(SessionError::StreamMapping); + } + + object = next; + + // Create a new object. + fragment = segment.push_fragment(object.sequence, object.size.map(usize::from))?; + remain = object.size.map(usize::from); + + log::trace!("next fragment: {:?}", fragment); + } + + match stream.read_chunk(remain.unwrap_or(usize::MAX), true).await? { + // Unbounded object has ended + None if remain.is_none() => break, + + // Bounded object ended early, oops. + None => return Err(DecodeError::UnexpectedEnd.into()), + + // NOTE: This does not make a copy! + // Bytes are immutable and ref counted. + Some(data) => { + remain = remain.map(|r| r - data.bytes.len()); + + log::trace!("next chunk: {:?}", data); + fragment.chunk(data.bytes)?; + } + } + } + + Ok(()) + } + + async fn run_source(mut self) -> Result<(), SessionError> { + loop { + // NOTE: This returns Closed when the source is closed. + let track = self.source.next_track().await?; + let name = track.name.clone(); + + let id = VarInt::from_u32(self.next.fetch_add(1, atomic::Ordering::SeqCst)); + self.subscribes.lock().unwrap().insert(id, track); + + let msg = message::Subscribe { + id, + namespace: self.control.ext.subscribe_split.then(|| "".to_string()), + name, + + // TODO correctly support these + start_group: message::SubscribeLocation::Latest(VarInt::ZERO), + start_object: message::SubscribeLocation::Absolute(VarInt::ZERO), + end_group: message::SubscribeLocation::None, + end_object: message::SubscribeLocation::None, + + params: Default::default(), + }; + + self.control.send(msg).await?; + } + } +} diff --git a/moq-transport/src/setup/client.rs b/moq-transport/src/setup/client.rs new file mode 100644 index 0000000..a18eb7d --- /dev/null +++ b/moq-transport/src/setup/client.rs @@ -0,0 +1,72 @@ +use super::{Extensions, Role, Versions}; +use crate::{ + coding::{Decode, DecodeError, Encode, EncodeError, Params}, + VarInt, +}; + +use crate::coding::{AsyncRead, AsyncWrite}; + +/// Sent by the client to setup the session. +// NOTE: This is not a message type, but rather the control stream header. +// Proposal: https://github.com/moq-wg/moq-transport/issues/138 +#[derive(Debug)] +pub struct Client { + /// The list of supported versions in preferred order. + pub versions: Versions, + + /// Indicate if the client is a publisher, a subscriber, or both. + pub role: Role, + + /// A list of known/offered extensions. + pub extensions: Extensions, + + /// Unknown parameters. + pub params: Params, +} + +impl Client { + /// Decode a client setup message. + pub async fn decode(r: &mut R) -> Result { + let typ = VarInt::decode(r).await?; + if typ.into_inner() != 0x40 { + return Err(DecodeError::InvalidMessage(typ)); + } + + let versions = Versions::decode(r).await?; + let mut params = Params::decode(r).await?; + + let role = params + .get::(VarInt::from_u32(0)) + .await? + .ok_or(DecodeError::MissingParameter)?; + + // Make sure the PATH parameter isn't used + // TODO: This assumes WebTransport support only + if params.has(VarInt::from_u32(1)) { + return Err(DecodeError::InvalidParameter); + } + + let extensions = Extensions::load(&mut params).await?; + + Ok(Self { + versions, + role, + extensions, + params, + }) + } + + /// Encode a server setup message. + pub async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + VarInt::from_u32(0x40).encode(w).await?; + self.versions.encode(w).await?; + + let mut params = self.params.clone(); + params.set(VarInt::from_u32(0), self.role).await?; + self.extensions.store(&mut params).await?; + + params.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/setup/extension.rs b/moq-transport/src/setup/extension.rs new file mode 100644 index 0000000..9e8c8cc --- /dev/null +++ b/moq-transport/src/setup/extension.rs @@ -0,0 +1,84 @@ +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params}; +use crate::session::SessionError; +use crate::VarInt; +use paste::paste; + +/// This is a custom extension scheme to allow/require draft PRs. +/// +/// By convention, the extension number is the PR number + 0xe0000. + +macro_rules! extensions { + {$($name:ident = $val:expr,)*} => { + #[derive(Clone, Default, Debug)] + pub struct Extensions { + $( + pub $name: bool, + )* + } + + impl Extensions { + pub async fn load(params: &mut Params) -> Result { + let mut extensions = Self::default(); + + $( + if let Some(_) = params.get::(VarInt::from_u32($val)).await? { + extensions.$name = true + } + )* + + Ok(extensions) + } + + pub async fn store(&self, params: &mut Params) -> Result<(), EncodeError> { + $( + if self.$name { + params.set(VarInt::from_u32($val), ExtensionExists{}).await?; + } + )* + + Ok(()) + } + + paste! { + $( + pub fn [](&self) -> Result<(), SessionError> { + match self.$name { + true => Ok(()), + false => Err(SessionError::RequiredExtension(VarInt::from_u32($val))), + } + } + )* + } + } + } +} + +struct ExtensionExists; + +#[async_trait::async_trait] +impl Decode for ExtensionExists { + async fn decode(_r: &mut R) -> Result { + Ok(ExtensionExists {}) + } +} + +#[async_trait::async_trait] +impl Encode for ExtensionExists { + async fn encode(&self, _w: &mut W) -> Result<(), EncodeError> { + Ok(()) + } +} + +extensions! { + // required for publishers: OBJECT contains expires VarInt in seconds: https://github.com/moq-wg/moq-transport/issues/249 + // TODO write up a PR + object_expires = 0xe00f9, + + // required: SUBSCRIBE chooses track ID: https://github.com/moq-wg/moq-transport/pull/258 + subscriber_id = 0xe0102, + + // optional: SUBSCRIBE contains namespace/name tuple: https://github.com/moq-wg/moq-transport/pull/277 + subscribe_split = 0xe0115, +} diff --git a/moq-transport/src/setup/mod.rs b/moq-transport/src/setup/mod.rs new file mode 100644 index 0000000..e7662e7 --- /dev/null +++ b/moq-transport/src/setup/mod.rs @@ -0,0 +1,17 @@ +//! Messages used for the MoQ Transport handshake. +//! +//! After establishing the WebTransport session, the client creates a bidirectional QUIC stream. +//! The client sends the [Client] message and the server responds with the [Server] message. +//! Both sides negotate the [Version] and [Role]. + +mod client; +mod extension; +mod role; +mod server; +mod version; + +pub use client::*; +pub use extension::*; +pub use role::*; +pub use server::*; +pub use version::*; diff --git a/moq-transport/src/setup/role.rs b/moq-transport/src/setup/role.rs new file mode 100644 index 0000000..10b30e0 --- /dev/null +++ b/moq-transport/src/setup/role.rs @@ -0,0 +1,74 @@ +use crate::coding::{AsyncRead, AsyncWrite}; + +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; + +/// Indicates the endpoint is a publisher, subscriber, or both. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Role { + Publisher, + Subscriber, + Both, +} + +impl Role { + /// Returns true if the role is publisher. + pub fn is_publisher(&self) -> bool { + match self { + Self::Publisher | Self::Both => true, + Self::Subscriber => false, + } + } + + /// Returns true if the role is a subscriber. + pub fn is_subscriber(&self) -> bool { + match self { + Self::Subscriber | Self::Both => true, + Self::Publisher => false, + } + } + + /// Returns true if two endpoints are compatible. + pub fn is_compatible(&self, other: Role) -> bool { + self.is_publisher() == other.is_subscriber() && self.is_subscriber() == other.is_publisher() + } +} + +impl From for VarInt { + fn from(r: Role) -> Self { + VarInt::from_u32(match r { + Role::Publisher => 0x1, + Role::Subscriber => 0x2, + Role::Both => 0x3, + }) + } +} + +impl TryFrom for Role { + type Error = DecodeError; + + fn try_from(v: VarInt) -> Result { + match v.into_inner() { + 0x1 => Ok(Self::Publisher), + 0x2 => Ok(Self::Subscriber), + 0x3 => Ok(Self::Both), + _ => Err(DecodeError::InvalidRole(v)), + } + } +} + +#[async_trait::async_trait] +impl Decode for Role { + /// Decode the role. + async fn decode(r: &mut R) -> Result { + let v = VarInt::decode(r).await?; + v.try_into() + } +} + +#[async_trait::async_trait] +impl Encode for Role { + /// Encode the role. + async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + VarInt::from(*self).encode(w).await + } +} diff --git a/moq-transport/src/setup/server.rs b/moq-transport/src/setup/server.rs new file mode 100644 index 0000000..7f73119 --- /dev/null +++ b/moq-transport/src/setup/server.rs @@ -0,0 +1,71 @@ +use super::{Extensions, Role, Version}; +use crate::{ + coding::{Decode, DecodeError, Encode, EncodeError, Params}, + VarInt, +}; + +use crate::coding::{AsyncRead, AsyncWrite}; + +/// Sent by the server in response to a client setup. +// NOTE: This is not a message type, but rather the control stream header. +// Proposal: https://github.com/moq-wg/moq-transport/issues/138 +#[derive(Debug)] +pub struct Server { + /// The list of supported versions in preferred order. + pub version: Version, + + /// Indicate if the server is a publisher, a subscriber, or both. + // Proposal: moq-wg/moq-transport#151 + pub role: Role, + + /// Custom extensions. + pub extensions: Extensions, + + /// Unknown parameters. + pub params: Params, +} + +impl Server { + /// Decode the server setup. + pub async fn decode(r: &mut R) -> Result { + let typ = VarInt::decode(r).await?; + if typ.into_inner() != 0x41 { + return Err(DecodeError::InvalidMessage(typ)); + } + + let version = Version::decode(r).await?; + let mut params = Params::decode(r).await?; + + let role = params + .get::(VarInt::from_u32(0)) + .await? + .ok_or(DecodeError::MissingParameter)?; + + // Make sure the PATH parameter isn't used + if params.has(VarInt::from_u32(1)) { + return Err(DecodeError::InvalidParameter); + } + + let extensions = Extensions::load(&mut params).await?; + + Ok(Self { + version, + role, + extensions, + params, + }) + } + + /// Encode the server setup. + pub async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + VarInt::from_u32(0x41).encode(w).await?; + self.version.encode(w).await?; + + let mut params = self.params.clone(); + params.set(VarInt::from_u32(0), self.role).await?; + self.extensions.store(&mut params).await?; + params.encode(w).await?; + + Ok(()) + } +} diff --git a/moq-transport/src/setup/version.rs b/moq-transport/src/setup/version.rs new file mode 100644 index 0000000..8933039 --- /dev/null +++ b/moq-transport/src/setup/version.rs @@ -0,0 +1,155 @@ +use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; + +use crate::coding::{AsyncRead, AsyncWrite}; + +use std::ops::Deref; + +/// A version number negotiated during the setup. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Version(pub VarInt); + +impl Version { + /// https://www.ietf.org/archive/id/draft-ietf-moq-transport-00.html + pub const DRAFT_00: Version = Version(VarInt::from_u32(0xff000000)); + + /// https://www.ietf.org/archive/id/draft-ietf-moq-transport-01.html + pub const DRAFT_01: Version = Version(VarInt::from_u32(0xff000001)); + + /// Fork of draft-ietf-moq-transport-00. + /// + /// Rough list of differences: + /// + /// # Messages + /// - Messages are sent over a control stream or a data stream. + /// - Data streams: each unidirectional stream contains a single OBJECT message. + /// - Control stream: a (client-initiated) bidirectional stream containing SETUP and then all other messages. + /// - Messages do not contain a length; unknown messages are fatal. + /// + /// # SETUP + /// - SETUP is split into SETUP_CLIENT and SETUP_SERVER with separate IDs. + /// - SETUP uses version `0xff00` for draft-00. + /// - SETUP no longer contains optional parameters; all are encoded in order and possibly zero. + /// - SETUP `role` indicates the role of the sender, not the role of the server. + /// - SETUP `path` field removed; use WebTransport for path. + /// + /// # SUBSCRIBE + /// - SUBSCRIBE `full_name` is split into separate `namespace` and `name` fields. + /// - SUBSCRIBE no longer contains optional parameters; all are encoded in order and possibly zero. + /// - SUBSCRIBE no longer contains the `auth` parameter; use WebTransport for auth. + /// - SUBSCRIBE no longer contains the `group` parameter; concept no longer exists. + /// - SUBSCRIBE contains the `id` instead of SUBSCRIBE_OK. + /// - SUBSCRIBE_OK and SUBSCRIBE_ERROR reference the subscription `id` the instead of the track `full_name`. + /// - SUBSCRIBE_ERROR was renamed to SUBSCRIBE_RESET, sent by publisher to terminate a SUBSCRIBE. + /// - SUBSCRIBE_STOP was added, sent by the subscriber to terminate a SUBSCRIBE. + /// - SUBSCRIBE_OK no longer has `expires`. + /// + /// # ANNOUNCE + /// - ANNOUNCE no longer contains optional parameters; all are encoded in order and possibly zero. + /// - ANNOUNCE no longer contains the `auth` field; use WebTransport for auth. + /// - ANNOUNCE_ERROR was renamed to ANNOUNCE_RESET, sent by publisher to terminate an ANNOUNCE. + /// - ANNOUNCE_STOP was added, sent by the subscriber to terminate an ANNOUNCE. + /// + /// # OBJECT + /// - OBJECT uses a dedicated QUIC stream. + /// - OBJECT has no size and continues until stream FIN. + /// - OBJECT `priority` is a i32 instead of a varint. (for practical reasons) + /// - OBJECT `expires` was added, a varint in seconds. + /// - OBJECT `group` was removed. + /// + /// # GROUP + /// - GROUP concept was removed, replaced with OBJECT as a QUIC stream. + pub const KIXEL_00: Version = Version(VarInt::from_u32(0xbad00)); + + /// Fork of draft-ietf-moq-transport-01. + /// + /// Most of the KIXEL_00 changes made it into the draft, or were reverted. + /// This was only used for a short time until extensions were created. + /// + /// - SUBSCRIBE contains a separate track namespace and track name field (accidental revert). [#277](https://github.com/moq-wg/moq-transport/pull/277) + /// - SUBSCRIBE contains the `track_id` instead of SUBSCRIBE_OK. [#145](https://github.com/moq-wg/moq-transport/issues/145) + /// - SUBSCRIBE_* reference `track_id` the instead of the `track_full_name`. [#145](https://github.com/moq-wg/moq-transport/issues/145) + /// - OBJECT `priority` is still a VarInt, but the max value is a u32 (implementation reasons) + /// - OBJECT messages within the same `group` MUST be on the same QUIC stream. + pub const KIXEL_01: Version = Version(VarInt::from_u32(0xbad01)); +} + +impl From for Version { + fn from(v: VarInt) -> Self { + Self(v) + } +} + +impl From for VarInt { + fn from(v: Version) -> Self { + v.0 + } +} + +impl Version { + /// Decode the version number. + pub async fn decode(r: &mut R) -> Result { + let v = VarInt::decode(r).await?; + Ok(Self(v)) + } + + /// Encode the version number. + pub async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + self.0.encode(w).await?; + Ok(()) + } +} + +/// A list of versions in arbitrary order. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Versions(Vec); + +#[async_trait::async_trait] +impl Decode for Versions { + /// Decode the version list. + async fn decode(r: &mut R) -> Result { + let count = VarInt::decode(r).await?.into_inner(); + let mut vs = Vec::new(); + + for _ in 0..count { + let v = Version::decode(r).await?; + vs.push(v); + } + + Ok(Self(vs)) + } +} + +#[async_trait::async_trait] +impl Encode for Versions { + /// Encode the version list. + async fn encode(&self, w: &mut W) -> Result<(), EncodeError> { + let size: VarInt = self.0.len().try_into()?; + size.encode(w).await?; + + for v in &self.0 { + v.encode(w).await?; + } + + Ok(()) + } +} + +impl Deref for Versions { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From> for Versions { + fn from(vs: Vec) -> Self { + Self(vs) + } +} + +impl From<[Version; N]> for Versions { + fn from(vs: [Version; N]) -> Self { + Self(vs.to_vec()) + } +} diff --git a/src/README.md b/src/README.md deleted file mode 100644 index f786e56..0000000 --- a/src/README.md +++ /dev/null @@ -1,23 +0,0 @@ -To be used as follows: - -```rust -// Create the parsebin element -let parsebin = gst::ElementFactory::make("parsebin", None).expect("Failed to create parsebin element"); - -// Create the h264parse element -let h264parse = gst::ElementFactory::make("h264parse", None).expect("Failed to create h264parse element"); - -// Create the mp4mux element for fmp4 -let mp4mux = gst::ElementFactory::make("mp4mux", None).expect("Failed to create mp4mux element"); -mp4mux.set_property("fragment-duration", &2000u32).unwrap(); // Fragment duration in milliseconds -mp4mux.set_property_from_str("streamable", "true"); // For streaming output - -// Add elements to the pipeline -pipeline.add_many(&[&waylandsrc, &parsebin, &h264parse, &mp4mux, &moqsink])?; - -// Link the elements together -waylandsrc.link(&parsebin)?; -parsebin.link(&h264parse)?; -h264parse.link(&mp4mux)?; -mp4mux.link(&moqsink)?; -``` \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index 647b8aa..0000000 --- a/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -#![allow(clippy::non_send_fields_in_send_ty, unused_doc_comments)] - -use gst::glib; - -mod warpsink; - -fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { - warpsink::register(plugin)?; - Ok(()) -} - -gst::plugin_define!( - warp, - env!("CARGO_PKG_DESCRIPTION"), - plugin_init, - concat!(env!("CARGO_PKG_VERSION"), "-", env!("COMMIT_ID")), - "MIT/X11", - env!("CARGO_PKG_NAME"), - env!("CARGO_PKG_NAME"), - env!("CARGO_PKG_REPOSITORY"), - env!("BUILD_REL_DATE") -); diff --git a/wayland-display-core/Cargo.toml b/wayland-display-core/Cargo.toml new file mode 100644 index 0000000..bb736f6 --- /dev/null +++ b/wayland-display-core/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "wayland-display-core" +authors = ["Victoria Brekenfeld ", "Alessandro Beltramo "] +version = "0.1.0" +edition = "2021" +license = "MIT" +description = "Wayland Compositor producing GStreamer buffers" +rust-version = "1.70" + +[lib] +name = "waylanddisplaycore" +crate-type = ["cdylib", "staticlib", "rlib"] +path = "src/lib.rs" + +[dependencies] +gst.workspace = true +gst-video.workspace = true +tracing.workspace = true +once_cell.workspace = true +wayland-backend = "0.1.0" +wayland-scanner = "0.30.0" + +[dependencies.smithay] +git = "https://github.com/smithay/Smithay" +rev = "b1c682742a" +default-features = false +features = [ + "backend_drm", + "backend_egl", + "backend_libinput", + "backend_udev", + "renderer_gl", + "use_system_lib", + "desktop", + "wayland_frontend" +] \ No newline at end of file diff --git a/wayland-display-core/LICENSE b/wayland-display-core/LICENSE new file mode 100644 index 0000000..08a0ba5 --- /dev/null +++ b/wayland-display-core/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Games on Whales https://github.com/games-on-whales/ + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/wayland-display-core/README.md b/wayland-display-core/README.md new file mode 100644 index 0000000..3a9b5f5 --- /dev/null +++ b/wayland-display-core/README.md @@ -0,0 +1,70 @@ +# gst-wayland-display + +A micro Wayland compositor that can be used as a Gstreamer plugin + +## Install + +see [cargo-c](https://github.com/lu-zero/cargo-c) + +```bash +git clone ... +cd gst-wayland-display +cargo cinstall --prefix=/usr/local +``` + +## GStreamer plugin + +TODO + +## C Bindings + +CmakeLists.txt + +```cmake +pkg_check_modules(libgstwaylanddisplay REQUIRED IMPORTED_TARGET libgstwaylanddisplay) +target_link_libraries( PUBLIC PkgConfig::libgstwaylanddisplay) +``` + +Include in your code: + +```c +#include +``` + +Example usage: + +```c++ +auto w_state = display_init("/dev/dri/renderD128"); // Pass a render node + +display_add_input_device(w_state, "/dev/input/event20"); // Mouse +display_add_input_device(w_state, "/dev/input/event21"); // Keyboard + +// Setting video as 1920x1080@60 +auto video_info = gst_caps_new_simple("video/x-raw", + "width", G_TYPE_INT, 1920, + "height", G_TYPE_INT, 1080, + "framerate", GST_TYPE_FRACTION, 60, 1, + "format", G_TYPE_STRING, "RGBx", + NULL); +display_set_video_info(w_state, video_info); + +// Get a list of the devices needed, ex: ["/dev/dri/renderD128", "/dev/dri/card0"] +auto n_devices = display_get_devices_len(w_state); +const char *devs[n_devices]; +display_get_devices(w_state, devs, n_devices); + +// Get a list of the env vars needed, notably the wayland socket +// ex: ["WAYLAND_DISPLAY=wayland-1"] +auto n_envs = display_get_envvars_len(w_state); +const char *envs[n_envs]; +display_get_envvars(w_state, envs, n_envs); + +// Example of polling for new video data +GstBuffer * v_buffer; +while(true){ + v_buffer = display_get_frame(w_state); + // TODO: do something with the video data +} + +display_finish(w_state); // Cleanup +``` \ No newline at end of file diff --git a/wayland-display-core/resources/cursor.rgba b/wayland-display-core/resources/cursor.rgba new file mode 100644 index 0000000..729c1cc Binary files /dev/null and b/wayland-display-core/resources/cursor.rgba differ diff --git a/wayland-display-core/resources/protocols/wayland-drm.xml b/wayland-display-core/resources/protocols/wayland-drm.xml new file mode 100644 index 0000000..eaf2654 --- /dev/null +++ b/wayland-display-core/resources/protocols/wayland-drm.xml @@ -0,0 +1,189 @@ + + + + + Copyright © 2008-2011 Kristian Høgsberg + Copyright © 2010-2011 Intel Corporation + + Permission to use, copy, modify, distribute, and sell this + software and its documentation for any purpose is hereby granted + without fee, provided that\n the above copyright notice appear in + all copies and that both that copyright notice and this permission + notice appear in supporting documentation, and that the name of + the copyright holders not be used in advertising or publicity + pertaining to distribution of the software without specific, + written prior permission. The copyright holders make no + representations about the suitability of this software for any + purpose. It is provided "as is" without express or implied + warranty. + + THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS + SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY + SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, + ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF + THIS SOFTWARE. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Bitmask of capabilities. + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/wayland-display-core/src/comp/focus.rs b/wayland-display-core/src/comp/focus.rs new file mode 100644 index 0000000..01d8039 --- /dev/null +++ b/wayland-display-core/src/comp/focus.rs @@ -0,0 +1,156 @@ +use smithay::{ + backend::input::KeyState, + desktop::{PopupKind, Window}, + input::{ + keyboard::{KeyboardTarget, KeysymHandle, ModifiersState}, + pointer::{AxisFrame, ButtonEvent, MotionEvent, PointerTarget, RelativeMotionEvent}, + Seat, + }, + reexports::wayland_server::{backend::ObjectId, protocol::wl_surface::WlSurface}, + utils::{IsAlive, Serial}, + wayland::seat::WaylandFocus, +}; + +#[derive(Debug, Clone, PartialEq)] +pub enum FocusTarget { + Wayland(Window), + Popup(PopupKind), +} + +impl IsAlive for FocusTarget { + fn alive(&self) -> bool { + match self { + FocusTarget::Wayland(w) => w.alive(), + FocusTarget::Popup(p) => p.alive(), + } + } +} + +impl From for FocusTarget { + fn from(w: Window) -> Self { + FocusTarget::Wayland(w) + } +} + +impl From for FocusTarget { + fn from(p: PopupKind) -> Self { + FocusTarget::Popup(p) + } +} + +impl KeyboardTarget for FocusTarget { + fn enter( + &self, + seat: &Seat, + data: &mut super::State, + keys: Vec>, + serial: Serial, + ) { + match self { + FocusTarget::Wayland(w) => KeyboardTarget::enter(w, seat, data, keys, serial), + FocusTarget::Popup(p) => { + KeyboardTarget::enter(p.wl_surface(), seat, data, keys, serial) + } + } + } + + fn leave(&self, seat: &Seat, data: &mut super::State, serial: Serial) { + match self { + FocusTarget::Wayland(w) => KeyboardTarget::leave(w, seat, data, serial), + FocusTarget::Popup(p) => KeyboardTarget::leave(p.wl_surface(), seat, data, serial), + } + } + + fn key( + &self, + seat: &Seat, + data: &mut super::State, + key: KeysymHandle<'_>, + state: KeyState, + serial: Serial, + time: u32, + ) { + match self { + FocusTarget::Wayland(w) => w.key(seat, data, key, state, serial, time), + FocusTarget::Popup(p) => p.wl_surface().key(seat, data, key, state, serial, time), + } + } + + fn modifiers( + &self, + seat: &Seat, + data: &mut super::State, + modifiers: ModifiersState, + serial: Serial, + ) { + match self { + FocusTarget::Wayland(w) => w.modifiers(seat, data, modifiers, serial), + FocusTarget::Popup(p) => p.wl_surface().modifiers(seat, data, modifiers, serial), + } + } +} + +impl PointerTarget for FocusTarget { + fn enter(&self, seat: &Seat, data: &mut super::State, event: &MotionEvent) { + match self { + FocusTarget::Wayland(w) => PointerTarget::enter(w, seat, data, event), + FocusTarget::Popup(p) => PointerTarget::enter(p.wl_surface(), seat, data, event), + } + } + + fn motion(&self, seat: &Seat, data: &mut super::State, event: &MotionEvent) { + match self { + FocusTarget::Wayland(w) => w.motion(seat, data, event), + FocusTarget::Popup(p) => p.wl_surface().motion(seat, data, event), + } + } + + fn relative_motion( + &self, + seat: &Seat, + data: &mut super::State, + event: &RelativeMotionEvent, + ) { + match self { + FocusTarget::Wayland(w) => w.relative_motion(seat, data, event), + FocusTarget::Popup(p) => p.wl_surface().relative_motion(seat, data, event), + } + } + + fn button(&self, seat: &Seat, data: &mut super::State, event: &ButtonEvent) { + match self { + FocusTarget::Wayland(w) => w.button(seat, data, event), + FocusTarget::Popup(p) => p.wl_surface().button(seat, data, event), + } + } + + fn axis(&self, seat: &Seat, data: &mut super::State, frame: AxisFrame) { + match self { + FocusTarget::Wayland(w) => w.axis(seat, data, frame), + FocusTarget::Popup(p) => p.wl_surface().axis(seat, data, frame), + } + } + + fn leave(&self, seat: &Seat, data: &mut super::State, serial: Serial, time: u32) { + match self { + FocusTarget::Wayland(w) => PointerTarget::leave(w, seat, data, serial, time), + FocusTarget::Popup(p) => PointerTarget::leave(p.wl_surface(), seat, data, serial, time), + } + } +} + +impl WaylandFocus for FocusTarget { + fn wl_surface(&self) -> Option { + match self { + FocusTarget::Wayland(w) => w.wl_surface(), + FocusTarget::Popup(p) => Some(p.wl_surface().clone()), + } + } + + fn same_client_as(&self, object_id: &ObjectId) -> bool { + match self { + FocusTarget::Wayland(w) => w.same_client_as(object_id), + FocusTarget::Popup(p) => p.wl_surface().same_client_as(object_id), + } + } +} diff --git a/wayland-display-core/src/comp/input.rs b/wayland-display-core/src/comp/input.rs new file mode 100644 index 0000000..03ab460 --- /dev/null +++ b/wayland-display-core/src/comp/input.rs @@ -0,0 +1,264 @@ +use super::{focus::FocusTarget, State}; +use smithay::{ + backend::{ + input::{ + Axis, AxisSource, Event, InputEvent, KeyState, KeyboardKeyEvent, PointerAxisEvent, + PointerButtonEvent, PointerMotionEvent, + }, + libinput::LibinputInputBackend, + }, + input::{ + keyboard::{keysyms, FilterResult}, + pointer::{AxisFrame, ButtonEvent, MotionEvent, RelativeMotionEvent}, + }, + reexports::{ + input::LibinputInterface, + nix::{fcntl, fcntl::OFlag, sys::stat}, + wayland_server::protocol::wl_pointer, + }, + utils::{Logical, Point, Serial, SERIAL_COUNTER}, +}; +use std::{ + os::{fd::FromRawFd, unix::io::OwnedFd}, + path::Path, + time::Instant, +}; + +pub struct NixInterface; + +impl LibinputInterface for NixInterface { + fn open_restricted(&mut self, path: &Path, flags: i32) -> Result { + fcntl::open(path, OFlag::from_bits_truncate(flags), stat::Mode::empty()) + .map(|fd| unsafe { OwnedFd::from_raw_fd(fd) }) + .map_err(|err| err as i32) + } + fn close_restricted(&mut self, fd: OwnedFd) { + let _ = fd; + } +} + +impl State { + pub fn process_input_event(&mut self, event: InputEvent) { + match event { + InputEvent::Keyboard { event, .. } => { + let keycode = event.key_code(); + let state = event.state(); + let serial = SERIAL_COUNTER.next_serial(); + let time = event.time_msec(); + let keyboard = self.seat.get_keyboard().unwrap(); + + keyboard.input::<(), _>( + self, + keycode, + state, + serial, + time, + |data, modifiers, handle| { + if state == KeyState::Pressed { + if modifiers.ctrl + && modifiers.shift + && !modifiers.alt + && !modifiers.logo + { + match handle.modified_sym() { + keysyms::KEY_Tab => { + if let Some(element) = data.space.elements().last().cloned() + { + data.surpressed_keys.insert(keysyms::KEY_Tab); + let location = + data.space.element_location(&element).unwrap(); + data.space.map_element(element.clone(), location, true); + data.seat.get_keyboard().unwrap().set_focus( + data, + Some(FocusTarget::from(element)), + serial, + ); + return FilterResult::Intercept(()); + } + } + keysyms::KEY_Q => { + if let Some(target) = + data.seat.get_keyboard().unwrap().current_focus() + { + match target { + FocusTarget::Wayland(window) => { + window.toplevel().send_close(); + } + _ => return FilterResult::Forward, + }; + data.surpressed_keys.insert(keysyms::KEY_Q); + return FilterResult::Intercept(()); + } + } + _ => {} + } + } + } else { + if data.surpressed_keys.remove(&handle.modified_sym()) { + return FilterResult::Intercept(()); + } + } + + FilterResult::Forward + }, + ); + } + InputEvent::PointerMotion { event, .. } => { + self.last_pointer_movement = Instant::now(); + let serial = SERIAL_COUNTER.next_serial(); + let delta = event.delta(); + self.pointer_location += delta; + self.pointer_location = self.clamp_coords(self.pointer_location); + + let pointer = self.seat.get_pointer().unwrap(); + let under = self + .space + .element_under(self.pointer_location) + .map(|(w, pos)| (w.clone().into(), pos)); + pointer.motion( + self, + under.clone(), + &MotionEvent { + location: self.pointer_location, + serial, + time: event.time_msec(), + }, + ); + pointer.relative_motion( + self, + under, + &RelativeMotionEvent { + delta, + delta_unaccel: event.delta_unaccel(), + utime: event.time(), + }, + ) + } + InputEvent::PointerMotionAbsolute { event } => { + self.last_pointer_movement = Instant::now(); + let serial = SERIAL_COUNTER.next_serial(); + if let Some(output) = self.output.as_ref() { + let output_size = output + .current_mode() + .unwrap() + .size + .to_f64() + .to_logical(output.current_scale().fractional_scale()) + .to_i32_round(); + self.pointer_location = ( + event.absolute_x_transformed(output_size.w), + event.absolute_y_transformed(output_size.h), + ) + .into(); + + let pointer = self.seat.get_pointer().unwrap(); + let under = self + .space + .element_under(self.pointer_location) + .map(|(w, pos)| (w.clone().into(), pos)); + pointer.motion( + self, + under.clone(), + &MotionEvent { + location: self.pointer_location, + serial, + time: event.time_msec(), + }, + ); + } + } + InputEvent::PointerButton { event, .. } => { + self.last_pointer_movement = Instant::now(); + let serial = SERIAL_COUNTER.next_serial(); + let button = event.button_code(); + + let state = wl_pointer::ButtonState::from(event.state()); + if wl_pointer::ButtonState::Pressed == state { + self.update_keyboard_focus(serial); + }; + self.seat.get_pointer().unwrap().button( + self, + &ButtonEvent { + button, + state: state.try_into().unwrap(), + serial, + time: event.time_msec(), + }, + ); + } + InputEvent::PointerAxis { event, .. } => { + self.last_pointer_movement = Instant::now(); + let horizontal_amount = event + .amount(Axis::Horizontal) + .or_else(|| event.amount_discrete(Axis::Horizontal).map(|x| x * 2.0)) + .unwrap_or(0.0); + let vertical_amount = event + .amount(Axis::Vertical) + .or_else(|| event.amount_discrete(Axis::Vertical).map(|y| y * 2.0)) + .unwrap_or(0.0); + let horizontal_amount_discrete = event.amount_discrete(Axis::Horizontal); + let vertical_amount_discrete = event.amount_discrete(Axis::Vertical); + + { + let mut frame = AxisFrame::new(event.time_msec()).source(event.source()); + if horizontal_amount != 0.0 { + frame = frame.value(Axis::Horizontal, horizontal_amount); + if let Some(discrete) = horizontal_amount_discrete { + frame = frame.discrete(Axis::Horizontal, discrete as i32); + } + } else if event.source() == AxisSource::Finger { + frame = frame.stop(Axis::Horizontal); + } + if vertical_amount != 0.0 { + frame = frame.value(Axis::Vertical, vertical_amount); + if let Some(discrete) = vertical_amount_discrete { + frame = frame.discrete(Axis::Vertical, discrete as i32); + } + } else if event.source() == AxisSource::Finger { + frame = frame.stop(Axis::Vertical); + } + self.seat.get_pointer().unwrap().axis(self, frame); + } + } + _ => {} + } + } + + fn clamp_coords(&self, pos: Point) -> Point { + if let Some(output) = self.output.as_ref() { + if let Some(mode) = output.current_mode() { + return ( + pos.x.max(0.0).min(mode.size.w as f64), + pos.y.max(0.0).min(mode.size.h as f64), + ) + .into(); + } + } + pos + } + + fn update_keyboard_focus(&mut self, serial: Serial) { + let pointer = self.seat.get_pointer().unwrap(); + let keyboard = self.seat.get_keyboard().unwrap(); + // change the keyboard focus unless the pointer or keyboard is grabbed + // We test for any matching surface type here but always use the root + // (in case of a window the toplevel) surface for the focus. + // So for example if a user clicks on a subsurface or popup the toplevel + // will receive the keyboard focus. Directly assigning the focus to the + // matching surface leads to issues with clients dismissing popups and + // subsurface menus (for example firefox-wayland). + // see here for a discussion about that issue: + // https://gitlab.freedesktop.org/wayland/wayland/-/issues/294 + if !pointer.is_grabbed() && !keyboard.is_grabbed() { + if let Some((window, _)) = self + .space + .element_under(self.pointer_location) + .map(|(w, p)| (w.clone(), p)) + { + self.space.raise_element(&window, true); + keyboard.set_focus(self, Some(FocusTarget::from(window)), serial); + return; + } + } + } +} diff --git a/wayland-display-core/src/comp/mod.rs b/wayland-display-core/src/comp/mod.rs new file mode 100644 index 0000000..602f0fe --- /dev/null +++ b/wayland-display-core/src/comp/mod.rs @@ -0,0 +1,521 @@ +use std::{ + collections::{HashMap, HashSet}, + ffi::CString, + os::unix::prelude::AsRawFd, + sync::{mpsc::Sender, Arc, Mutex, Weak}, + time::{Duration, Instant}, +}; + +use super::Command; +use gst_video::VideoInfo; +use once_cell::sync::Lazy; +use smithay::{ + backend::{ + allocator::dmabuf::Dmabuf, + drm::{DrmNode, NodeType}, + egl::{EGLContext, EGLDevice, EGLDisplay}, + libinput::LibinputInputBackend, + renderer::{ + damage::{DamageTrackedRenderer, DamageTrackedRendererError as DTRError}, + element::memory::MemoryRenderBuffer, + gles2::{Gles2Renderbuffer, Gles2Renderer}, + Bind, Offscreen, + }, + }, + desktop::{ + utils::{ + send_frames_surface_tree, surface_presentation_feedback_flags_from_states, + surface_primary_scanout_output, update_surface_primary_scanout_output, + OutputPresentationFeedback, + }, + PopupManager, Space, Window, + }, + input::{keyboard::XkbConfig, pointer::CursorImageStatus, Seat, SeatState}, + output::{Mode as OutputMode, Output, PhysicalProperties, Subpixel}, + reexports::{ + calloop::{ + channel::{Channel, Event}, + generic::Generic, + timer::{TimeoutAction, Timer}, + EventLoop, Interest, LoopHandle, Mode, PostAction, + }, + input::Libinput, + wayland_protocols::wp::presentation_time::server::wp_presentation_feedback, + wayland_server::{ + backend::{ClientData, ClientId, DisconnectReason}, + Display, DisplayHandle, + }, + }, + utils::{Clock, Logical, Monotonic, Physical, Point, Rectangle, Size, Transform}, + wayland::{ + compositor::{with_states, CompositorState}, + data_device::DataDeviceState, + dmabuf::{DmabufGlobal, DmabufState}, + output::OutputManagerState, + presentation::PresentationState, + shell::xdg::{XdgShellState, XdgToplevelSurfaceData}, + shm::ShmState, + socket::ListeningSocketSource, + viewporter::ViewporterState, + relative_pointer::RelativePointerManagerState, + }, +}; +use wayland_backend::server::GlobalId; + +mod focus; +mod input; +mod rendering; + +pub use self::focus::*; +pub use self::input::*; +pub use self::rendering::*; +use crate::{utils::RenderTarget, wayland::protocols::wl_drm::create_drm_global}; + +static EGL_DISPLAYS: Lazy, Weak>>> = + Lazy::new(|| Mutex::new(HashMap::new())); + +struct ClientState; +impl ClientData for ClientState { + fn initialized(&self, _client_id: ClientId) {} + fn disconnected(&self, _client_id: ClientId, _reason: DisconnectReason) {} +} + +pub(crate) struct Data { + pub(crate) display: Display, + pub(crate) state: State, +} + +#[allow(dead_code)] +pub(crate) struct State { + handle: LoopHandle<'static, Data>, + should_quit: bool, + clock: Clock, + + // render + dtr: Option, + renderbuffer: Option, + pub renderer: Gles2Renderer, + egl_display_ref: Arc, + dmabuf_global: Option<(DmabufGlobal, GlobalId)>, + last_render: Option, + + // management + pub output: Option, + pub video_info: Option, + pub seat: Seat, + pub space: Space, + pub popups: PopupManager, + pointer_location: Point, + last_pointer_movement: Instant, + cursor_element: MemoryRenderBuffer, + pub cursor_state: CursorImageStatus, + surpressed_keys: HashSet, + pub pending_windows: Vec, + input_context: Libinput, + + // wayland state + pub dh: DisplayHandle, + pub compositor_state: CompositorState, + pub data_device_state: DataDeviceState, + pub dmabuf_state: DmabufState, + output_state: OutputManagerState, + presentation_state: PresentationState, + relative_ptr_state: RelativePointerManagerState, + pub seat_state: SeatState, + pub shell_state: XdgShellState, + pub shm_state: ShmState, + viewporter_state: ViewporterState, +} + +pub fn get_egl_device_for_node(drm_node: &DrmNode) -> EGLDevice { + let drm_node = drm_node + .node_with_type(NodeType::Render) + .and_then(Result::ok) + .unwrap_or(drm_node.clone()); + EGLDevice::enumerate() + .expect("Failed to enumerate EGLDevices") + .find(|d| d.try_get_render_node().unwrap_or_default() == Some(drm_node)) + .expect("Unable to find EGLDevice for drm-node") +} + +pub(crate) fn init( + command_src: Channel, + render: impl Into, + devices_tx: Sender>, + envs_tx: Sender>, +) { + let clock = Clock::new().expect("Failed to initialize clock"); + let mut display = Display::::new().unwrap(); + let dh = display.handle(); + + // init state + let compositor_state = CompositorState::new::(&dh); + let data_device_state = DataDeviceState::new::(&dh); + let mut dmabuf_state = DmabufState::new(); + let output_state = OutputManagerState::new_with_xdg_output::(&dh); + let presentation_state = PresentationState::new::(&dh, clock.id() as _); + let relative_ptr_state = RelativePointerManagerState::new::(&dh); + let mut seat_state = SeatState::new(); + let shell_state = XdgShellState::new::(&dh); + let viewporter_state = ViewporterState::new::(&dh); + + let render_target = render.into(); + let render_node: Option = render_target.clone().into(); + + // init render backend + let (egl_display_ref, context) = { + let mut displays = EGL_DISPLAYS.lock().unwrap(); + let maybe_display = displays + .get(&render_node) + .and_then(|weak_display| weak_display.upgrade()); + + let egl = match maybe_display { + Some(display) => display, + None => { + let device = match render_node.as_ref() { + Some(render_node) => get_egl_device_for_node(render_node), + None => EGLDevice::enumerate() + .expect("Failed to enumerate EGLDevices") + .find(|device| { + device + .extensions() + .iter() + .any(|e| e == "EGL_MESA_device_software") + }) + .expect("Failed to find software device"), + }; + let display = + Arc::new(EGLDisplay::new(device).expect("Failed to initialize EGL display")); + displays.insert(render_node, Arc::downgrade(&display)); + display + } + }; + let context = EGLContext::new(&egl).expect("Failed to initialize EGL context"); + (egl, context) + }; + let renderer = unsafe { Gles2Renderer::new(context) }.expect("Failed to initialize renderer"); + let _ = devices_tx.send(render_target.as_devices()); + + let shm_state = ShmState::new::(&dh, vec![]); + let dmabuf_global = if let RenderTarget::Hardware(node) = render_target { + let formats = Bind::::supported_formats(&renderer) + .expect("Failed to query formats") + .into_iter() + .collect::>(); + + // dma buffer + let dmabuf_global = dmabuf_state.create_global::(&dh, formats.clone()); + // wl_drm (mesa protocol, so we don't need EGL_WL_bind_display) + let wl_drm_global = create_drm_global::( + &dh, + node.dev_path().expect("Failed to determine DrmNode path?"), + formats.clone(), + &dmabuf_global, + ); + + Some((dmabuf_global, wl_drm_global)) + } else { + None + }; + + let cursor_element = + MemoryRenderBuffer::from_memory(CURSOR_DATA_BYTES, (64, 64), 1, Transform::Normal, None); + + // init input backend + let libinput_context = Libinput::new_from_path(NixInterface); + let input_context = libinput_context.clone(); + let libinput_backend = LibinputInputBackend::new(libinput_context); + + let space = Space::default(); + + let mut seat = seat_state.new_wl_seat(&dh, "seat-0"); + seat.add_keyboard(XkbConfig::default(), 200, 25) + .expect("Failed to add keyboard to seat"); + seat.add_pointer(); + + let mut event_loop = + EventLoop::::try_new_high_precision().expect("Unable to create event_loop"); + let state = State { + handle: event_loop.handle(), + should_quit: false, + clock, + + renderer, + egl_display_ref, + dtr: None, + renderbuffer: None, + dmabuf_global, + video_info: None, + last_render: None, + + space, + popups: PopupManager::default(), + seat, + output: None, + pointer_location: (0., 0.).into(), + last_pointer_movement: Instant::now(), + cursor_element, + cursor_state: CursorImageStatus::Default, + surpressed_keys: HashSet::new(), + pending_windows: Vec::new(), + input_context, + + dh: display.handle(), + compositor_state, + data_device_state, + dmabuf_state, + output_state, + presentation_state, + relative_ptr_state, + seat_state, + shell_state, + shm_state, + viewporter_state, + }; + + // init event loop + event_loop + .handle() + .insert_source(libinput_backend, move |event, _, data| { + data.state.process_input_event(event) + }) + .unwrap(); + + event_loop + .handle() + .insert_source(command_src, move |event, _, data| { + match event { + Event::Msg(Command::VideoInfo(info)) => { + let size: Size = + (info.width() as i32, info.height() as i32).into(); + let framerate = info.fps(); + let duration = Duration::from_secs_f64( + framerate.numer() as f64 / framerate.denom() as f64, + ); + + // init wayland objects + let output = data.state.output.get_or_insert_with(|| { + let output = Output::new( + "HEADLESS-1".into(), + PhysicalProperties { + make: "Virtual".into(), + model: "Wolf".into(), + size: (0, 0).into(), + subpixel: Subpixel::Unknown, + }, + ); + output.create_global::(&data.display.handle()); + output + }); + let mode = OutputMode { + size: size.into(), + refresh: (duration.as_secs_f64() * 1000.0).round() as i32, + }; + output.change_current_state(Some(mode), None, None, None); + output.set_preferred(mode); + let dtr = DamageTrackedRenderer::from_output(&output); + + data.state.space.map_output(&output, (0, 0)); + data.state.dtr = Some(dtr); + data.state.pointer_location = (size.w as f64 / 2.0, size.h as f64 / 2.0).into(); + data.state.renderbuffer = Some( + data.state + .renderer + .create_buffer((info.width() as i32, info.height() as i32).into()) + .expect("Failed to create renderbuffer"), + ); + data.state.video_info = Some(info); + + let new_size = size + .to_f64() + .to_logical(output.current_scale().fractional_scale()) + .to_i32_round(); + for window in data.state.space.elements() { + let toplevel = window.toplevel(); + let max_size = Rectangle::from_loc_and_size( + (0, 0), + with_states(toplevel.wl_surface(), |states| { + states + .data_map + .get::() + .map(|attrs| attrs.lock().unwrap().max_size) + }) + .unwrap_or(new_size), + ); + + let new_size = max_size + .intersection(Rectangle::from_loc_and_size((0, 0), new_size)) + .map(|rect| rect.size); + toplevel.with_pending_state(|state| state.size = new_size); + toplevel.send_configure(); + } + } + Event::Msg(Command::InputDevice(path)) => { + tracing::info!(path, "Adding input device."); + data.state.input_context.path_add_device(&path); + } + Event::Msg(Command::Buffer(buffer_sender)) => { + let wait = if let Some(last_render) = data.state.last_render { + let framerate = data.state.video_info.as_ref().unwrap().fps(); + let duration = Duration::from_secs_f64( + framerate.denom() as f64 / framerate.numer() as f64, + ); + let time_passed = Instant::now().duration_since(last_render); + if time_passed < duration { + Some(duration - time_passed) + } else { + None + } + } else { + None + }; + + let render = move |data: &mut Data, now: Instant| { + if let Err(_) = match data.state.create_frame() { + Ok((buf, damage, render_element_states)) => { + data.state.last_render = Some(now); + let res = buffer_sender.send(Ok(buf)); + + if let Some(output) = data.state.output.as_ref() { + let mut output_presentation_feedback = + OutputPresentationFeedback::new(output); + for window in data.state.space.elements() { + window.with_surfaces(|surface, states| { + update_surface_primary_scanout_output( + surface, + output, + states, + &render_element_states, + |next_output, _, _, _| next_output, + ); + }); + window.send_frame( + output, + data.state.clock.now(), + Some(Duration::ZERO), + |_, _| Some(output.clone()), + ); + window.take_presentation_feedback( + &mut output_presentation_feedback, + surface_primary_scanout_output, + |surface, _| { + surface_presentation_feedback_flags_from_states( + surface, + &render_element_states, + ) + }, + ); + } + if damage.is_some() { + output_presentation_feedback.presented( + data.state.clock.now(), + output + .current_mode() + .map(|mode| mode.refresh as u32) + .unwrap_or_default(), + 0, + wp_presentation_feedback::Kind::Vsync, + ); + } + if let CursorImageStatus::Surface(wl_surface) = + &data.state.cursor_state + { + send_frames_surface_tree( + wl_surface, + output, + data.state.clock.now(), + None, + |_, _| Some(output.clone()), + ) + } + } + + res + } + Err(err) => { + tracing::error!(?err, "Rendering failed."); + buffer_sender.send(Err(match err { + DTRError::OutputNoMode(_) => unreachable!(), + DTRError::Rendering(err) => err.into(), + })) + } + } { + data.state.should_quit = true; + } + }; + + match wait { + Some(duration) => { + if let Err(err) = data.state.handle.insert_source( + Timer::from_duration(duration), + move |now, _, data| { + render(data, now); + TimeoutAction::Drop + }, + ) { + tracing::error!(?err, "Event loop error."); + data.state.should_quit = true; + }; + } + None => render(data, Instant::now()), + }; + } + Event::Msg(Command::Quit) | Event::Closed => { + data.state.should_quit = true; + } + }; + }) + .unwrap(); + + let source = ListeningSocketSource::new_auto().unwrap(); + let socket_name = source.socket_name().to_string_lossy().into_owned(); + tracing::info!(?socket_name, "Listening on wayland socket."); + event_loop + .handle() + .insert_source(source, |client_stream, _, data| { + if let Err(err) = data + .display + .handle() + .insert_client(client_stream, std::sync::Arc::new(ClientState)) + { + tracing::error!(?err, "Error adding wayland client."); + }; + }) + .expect("Failed to init wayland socket source"); + + event_loop + .handle() + .insert_source( + Generic::new( + display.backend().poll_fd().as_raw_fd(), + Interest::READ, + Mode::Level, + ), + |_, _, data| { + data.display.dispatch_clients(&mut data.state).unwrap(); + Ok(PostAction::Continue) + }, + ) + .expect("Failed to init wayland server source"); + + let env_vars = vec![CString::new(format!("WAYLAND_DISPLAY={}", socket_name)).unwrap()]; + if let Err(err) = envs_tx.send(env_vars) { + tracing::warn!(?err, "Failed to post environment to application."); + } + + let mut data = Data { display, state }; + let signal = event_loop.get_signal(); + if let Err(err) = event_loop.run(None, &mut data, |data| { + data.display + .flush_clients() + .expect("Failed to flush clients"); + data.state.space.refresh(); + data.state.popups.cleanup(); + + if data.state.should_quit { + signal.stop(); + } + }) { + tracing::error!(?err, "Event loop broke."); + } +} diff --git a/wayland-display-core/src/comp/rendering.rs b/wayland-display-core/src/comp/rendering.rs new file mode 100644 index 0000000..3813f11 --- /dev/null +++ b/wayland-display-core/src/comp/rendering.rs @@ -0,0 +1,121 @@ +use std::time::{Duration, Instant}; + +use smithay::{ + backend::renderer::{ + damage::DamageTrackedRendererError as DTRError, + element::{ + memory::MemoryRenderBufferRenderElement, surface::WaylandSurfaceRenderElement, + RenderElementStates, + }, + gles2::Gles2Renderer, + Bind, ExportMem, ImportAll, ImportMem, Renderer, Unbind, + }, + desktop::space::render_output, + input::pointer::CursorImageStatus, + render_elements, + utils::{Physical, Rectangle}, +}; + +use super::State; + +pub const CURSOR_DATA_BYTES: &[u8] = include_bytes!("../../resources/cursor.rgba"); + +render_elements! { + CursorElement where R: Renderer + ImportAll + ImportMem; + Surface=WaylandSurfaceRenderElement, + Memory=MemoryRenderBufferRenderElement +} + +impl State { + pub fn create_frame( + &mut self, + ) -> Result< + ( + gst::Buffer, + Option>>, + RenderElementStates, + ), + DTRError, + > { + assert!(self.output.is_some()); + assert!(self.dtr.is_some()); + assert!(self.video_info.is_some()); + assert!(self.renderbuffer.is_some()); + + let elements = + if Instant::now().duration_since(self.last_pointer_movement) < Duration::from_secs(5) { + match &self.cursor_state { + CursorImageStatus::Default => vec![CursorElement::Memory( + MemoryRenderBufferRenderElement::from_buffer( + &mut self.renderer, + self.pointer_location.to_physical_precise_round(1), + &self.cursor_element, + None, + None, + None, + ) + .map_err(DTRError::Rendering)?, + )], + CursorImageStatus::Surface(wl_surface) => { + smithay::backend::renderer::element::surface::render_elements_from_surface_tree( + &mut self.renderer, + wl_surface, + self.pointer_location.to_physical_precise_round(1), + 1., + ) + } + CursorImageStatus::Hidden => vec![], + } + } else { + vec![] + }; + + self.renderer + .bind(self.renderbuffer.clone().unwrap()) + .map_err(DTRError::Rendering)?; + let (damage, render_element_states) = render_output( + self.output.as_ref().unwrap(), + &mut self.renderer, + 1, + [&self.space], + &*elements, + self.dtr.as_mut().unwrap(), + [0.0, 0.0, 0.0, 1.0], + )?; + + let mapping = self + .renderer + .copy_framebuffer(Rectangle::from_loc_and_size( + (0, 0), + ( + self.video_info.as_ref().unwrap().width() as i32, + self.video_info.as_ref().unwrap().height() as i32, + ), + )) + .expect("Failed to export framebuffer"); + let map = self + .renderer + .map_texture(&mapping) + .expect("Failed to download framebuffer"); + + let buffer = { + let mut buffer = gst::Buffer::with_size(map.len()).expect("failed to create buffer"); + { + let buffer = buffer.get_mut().unwrap(); + + let mut vframe = gst_video::VideoFrameRef::from_buffer_ref_writable( + buffer, + self.video_info.as_ref().unwrap(), + ) + .unwrap(); + let plane_data = vframe.plane_data_mut(0).unwrap(); + plane_data.clone_from_slice(map); + } + + buffer + }; + self.renderer.unbind().map_err(DTRError::Rendering)?; + + Ok((buffer, damage, render_element_states)) + } +} diff --git a/wayland-display-core/src/lib.rs b/wayland-display-core/src/lib.rs new file mode 100644 index 0000000..9da38d7 --- /dev/null +++ b/wayland-display-core/src/lib.rs @@ -0,0 +1,134 @@ +use gst_video::VideoInfo; + +use smithay::backend::drm::CreateDrmNodeError; +use smithay::backend::SwapBuffersError; +use smithay::reexports::calloop::channel::Sender; + +use std::ffi::{CString}; +use std::str::FromStr; +use std::sync::mpsc::{self, Receiver, SyncSender}; +use std::thread::JoinHandle; + +use utils::RenderTarget; + +pub(crate) mod comp; +pub(crate) mod utils; +pub(crate) mod wayland; + +pub(crate) enum Command { + InputDevice(String), + VideoInfo(VideoInfo), + Buffer(SyncSender>), + Quit, +} + +pub struct WaylandDisplay { + thread_handle: Option>, + command_tx: Sender, + + pub devices: MaybeRecv>, + pub envs: MaybeRecv>, +} + +pub enum MaybeRecv { + Rx(Receiver), + Value(T), +} + +impl MaybeRecv { + pub fn get(&mut self) -> &T { + match self { + MaybeRecv::Rx(recv) => { + let value = recv.recv().unwrap(); + *self = MaybeRecv::Value(value.clone()); + self.get() + } + MaybeRecv::Value(val) => val, + } + } +} + +impl WaylandDisplay { + pub fn new(render_node: Option) -> Result { + let (channel_tx, channel_rx) = std::sync::mpsc::sync_channel(0); + let (devices_tx, devices_rx) = std::sync::mpsc::channel(); + let (envs_tx, envs_rx) = std::sync::mpsc::channel(); + let render_target = RenderTarget::from_str( + &render_node.unwrap_or_else(|| String::from("/dev/dri/renderD128")), + )?; + + let thread_handle = std::thread::spawn(move || { + if let Err(err) = std::panic::catch_unwind(|| { + // calloops channel is not "UnwindSafe", but the std channel is... *sigh* lets workaround it creatively + let (command_tx, command_src) = smithay::reexports::calloop::channel::channel(); + channel_tx.send(command_tx).unwrap(); + comp::init(command_src, render_target, devices_tx, envs_tx); + }) { + tracing::error!(?err, "Compositor thread panic'ed!"); + } + }); + let command_tx = channel_rx.recv().unwrap(); + + Ok(WaylandDisplay { + thread_handle: Some(thread_handle), + command_tx, + devices: MaybeRecv::Rx(devices_rx), + envs: MaybeRecv::Rx(envs_rx), + }) + } + + pub fn devices(&mut self) -> impl Iterator { + self.devices + .get() + .iter() + .map(|string| string.to_str().unwrap()) + } + + pub fn env_vars(&mut self) -> impl Iterator { + self.envs + .get() + .iter() + .map(|string| string.to_str().unwrap()) + } + + pub fn add_input_device(&self, path: impl Into) { + let _ = self.command_tx.send(Command::InputDevice(path.into())); + } + + pub fn set_video_info(&self, info: VideoInfo) { + let _ = self.command_tx.send(Command::VideoInfo(info)); + } + + pub fn frame(&self) -> Result { + let (buffer_tx, buffer_rx) = mpsc::sync_channel(0); + if let Err(err) = self.command_tx.send(Command::Buffer(buffer_tx)) { + tracing::warn!(?err, "Failed to send buffer command."); + return Err(gst::FlowError::Eos); + } + + match buffer_rx.recv() { + Ok(Ok(buffer)) => Ok(buffer), + Ok(Err(err)) => match err { + SwapBuffersError::AlreadySwapped => unreachable!(), + SwapBuffersError::ContextLost(_) => Err(gst::FlowError::Eos), + SwapBuffersError::TemporaryFailure(_) => Err(gst::FlowError::Error), + }, + Err(err) => { + tracing::warn!(?err, "Failed to recv buffer ack."); + Err(gst::FlowError::Error) + } + } + } +} + +impl Drop for WaylandDisplay { + fn drop(&mut self) { + if let Err(err) = self.command_tx.send(Command::Quit) { + tracing::warn!("Failed to send stop command: {}", err); + return; + }; + if self.thread_handle.take().unwrap().join().is_err() { + tracing::warn!("Failed to join compositor thread"); + }; + } +} \ No newline at end of file diff --git a/wayland-display-core/src/utils/mod.rs b/wayland-display-core/src/utils/mod.rs new file mode 100644 index 0000000..ca50d83 --- /dev/null +++ b/wayland-display-core/src/utils/mod.rs @@ -0,0 +1,3 @@ +mod target; + +pub use self::target::*; diff --git a/wayland-display-core/src/utils/target.rs b/wayland-display-core/src/utils/target.rs new file mode 100644 index 0000000..712da2e --- /dev/null +++ b/wayland-display-core/src/utils/target.rs @@ -0,0 +1,88 @@ +use std::{ffi::CString, os::unix::fs::MetadataExt, str::FromStr}; + +use smithay::{ + backend::{ + drm::{CreateDrmNodeError, DrmNode, NodeType}, + udev, + }, + reexports::nix::sys::stat::major, +}; + +#[derive(Debug, Clone, PartialEq)] +pub enum RenderTarget { + Hardware(DrmNode), + Software, +} + +impl FromStr for RenderTarget { + type Err = CreateDrmNodeError; + fn from_str(s: &str) -> Result { + Ok(match s { + "software" => RenderTarget::Software, + path => RenderTarget::Hardware(DrmNode::from_path(path)?), + }) + } +} + +impl Into> for RenderTarget { + fn into(self) -> Option { + match self { + RenderTarget::Hardware(node) => Some(node), + RenderTarget::Software => None, + } + } +} + +impl Into for DrmNode { + fn into(self) -> RenderTarget { + RenderTarget::Hardware(self) + } +} + +#[cfg(target_os = "linux")] +const NVIDIA_MAJOR: u64 = 195; + +// no clue how this number is on BSDs, feel free to contribute + +impl RenderTarget { + pub fn as_devices(&self) -> Vec { + match self { + RenderTarget::Hardware(node) => { + let mut devices = Vec::new(); + if let Some(primary) = node.dev_path_with_type(NodeType::Primary) { + devices.push(primary); + } + if let Some(render) = node.dev_path_with_type(NodeType::Render) { + devices.push(render); + } + if udev::driver(node.dev_id()) + .ok() + .flatten() + .map(|s| s.to_str() == Some("nvidia")) + .unwrap_or(false) + { + // no idea how match nvidia device nodes to kms/dri-nodes, so lets map all nvidia-nodes to be sure + for entry in std::fs::read_dir("/dev").expect("Unable to access /dev") { + if let Ok(entry) = entry { + if let Ok(metadata) = entry.metadata() { + if metadata.is_file() && major(metadata.dev()) == NVIDIA_MAJOR { + devices.push(entry.path()); + } + } + } + } + } + + devices + .into_iter() + .flat_map(|path| { + path.to_str() + .map(String::from) + .and_then(|string| CString::new(string).ok()) + }) + .collect() + } + RenderTarget::Software => Vec::new(), + } + } +} diff --git a/wayland-display-core/src/wayland/handlers/compositor.rs b/wayland-display-core/src/wayland/handlers/compositor.rs new file mode 100644 index 0000000..fde5178 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/compositor.rs @@ -0,0 +1,147 @@ +use smithay::{ + backend::renderer::utils::{on_commit_buffer_handler}, + delegate_compositor, + desktop::PopupKind, + reexports::{ + wayland_protocols::xdg::shell::server::xdg_toplevel::State as XdgState, + wayland_server::protocol::{wl_buffer::WlBuffer, wl_surface::WlSurface}, + }, + utils::{Size, SERIAL_COUNTER}, + wayland::{ + buffer::BufferHandler, + compositor::{with_states, CompositorHandler, CompositorState}, + seat::WaylandFocus, + shell::xdg::{XdgPopupSurfaceData, XdgToplevelSurfaceData}, + }, +}; + +use crate::comp::{FocusTarget, State}; + +impl BufferHandler for State { + fn buffer_destroyed(&mut self, _buffer: &WlBuffer) {} +} + +impl CompositorHandler for State { + fn compositor_state(&mut self) -> &mut CompositorState { + &mut self.compositor_state + } + + fn commit(&mut self, surface: &WlSurface) { + on_commit_buffer_handler(surface); + + if let Some(window) = self + .space + .elements() + .find(|w| w.wl_surface().as_ref() == Some(surface)) + { + window.on_commit(); + } + self.popups.commit(surface); + + // send the initial configure if relevant + if let Some(idx) = self + .pending_windows + .iter_mut() + .position(|w| w.wl_surface().as_ref() == Some(surface)) + { + let window = self.pending_windows.swap_remove(idx); + + let toplevel = window.toplevel(); + let (initial_configure_sent, max_size) = with_states(surface, |states| { + let attributes = states.data_map.get::().unwrap(); + let attributes_guard = attributes.lock().unwrap(); + + ( + attributes_guard.initial_configure_sent, + attributes_guard.max_size, + ) + }); + + if self.output.is_none() { + return; + } + + if !initial_configure_sent { + if max_size.w == 0 && max_size.h == 0 { + toplevel.with_pending_state(|state| { + state.size = Some( + self.output + .as_ref() + .unwrap() + .current_mode() + .unwrap() + .size + .to_f64() + .to_logical( + self.output + .as_ref() + .unwrap() + .current_scale() + .fractional_scale(), + ) + .to_i32_round(), + ); + state.states.set(XdgState::Fullscreen); + }); + } + toplevel.with_pending_state(|state| { + state.states.set(XdgState::Activated); + }); + toplevel.send_configure(); + self.pending_windows.push(window); + } else { + let window_size = toplevel.current_state().size.unwrap_or((0, 0).into()); + let output_size: Size = self + .output + .as_ref() + .unwrap() + .current_mode() + .unwrap() + .size + .to_f64() + .to_logical( + self.output + .as_ref() + .unwrap() + .current_scale() + .fractional_scale(), + ) + .to_i32_round(); + let loc = ( + (output_size.w / 2) - (window_size.w / 2), + (output_size.h / 2) - (window_size.h / 2), + ); + self.space.map_element(window.clone(), loc, true); + self.seat.get_keyboard().unwrap().set_focus( + self, + Some(FocusTarget::from(window)), + SERIAL_COUNTER.next_serial(), + ); + } + + return; + } + + if let Some(popup) = self.popups.find_popup(surface) { + let PopupKind::Xdg(ref popup) = popup; + let initial_configure_sent = with_states(surface, |states| { + states + .data_map + .get::() + .unwrap() + .lock() + .unwrap() + .initial_configure_sent + }); + if !initial_configure_sent { + // NOTE: This should never fail as the initial configure is always + // allowed. + popup.send_configure().expect("initial configure failed"); + } + + return; + }; + } +} + +delegate_compositor!(State); diff --git a/wayland-display-core/src/wayland/handlers/data_device.rs b/wayland-display-core/src/wayland/handlers/data_device.rs new file mode 100644 index 0000000..96792be --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/data_device.rs @@ -0,0 +1,18 @@ +use smithay::{ + delegate_data_device, + wayland::data_device::{ + ClientDndGrabHandler, DataDeviceHandler, DataDeviceState, ServerDndGrabHandler, + }, +}; + +use crate::comp::State; + +impl ServerDndGrabHandler for State {} +impl ClientDndGrabHandler for State {} +impl DataDeviceHandler for State { + fn data_device_state(&self) -> &DataDeviceState { + &self.data_device_state + } +} + +delegate_data_device!(State); diff --git a/wayland-display-core/src/wayland/handlers/dmabuf.rs b/wayland-display-core/src/wayland/handlers/dmabuf.rs new file mode 100644 index 0000000..bbbeb48 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/dmabuf.rs @@ -0,0 +1,26 @@ +use smithay::{ + backend::{allocator::dmabuf::Dmabuf, renderer::ImportDma}, + delegate_dmabuf, + wayland::dmabuf::{DmabufGlobal, DmabufHandler, DmabufState, ImportError}, +}; + +use crate::comp::State; + +impl DmabufHandler for State { + fn dmabuf_state(&mut self) -> &mut DmabufState { + &mut self.dmabuf_state + } + + fn dmabuf_imported( + &mut self, + _global: &DmabufGlobal, + dmabuf: Dmabuf, + ) -> Result<(), ImportError> { + self.renderer + .import_dmabuf(&dmabuf, None) + .map(|_| ()) + .map_err(|_| ImportError::Failed) + } +} + +delegate_dmabuf!(State); diff --git a/wayland-display-core/src/wayland/handlers/mod.rs b/wayland-display-core/src/wayland/handlers/mod.rs new file mode 100644 index 0000000..eb68baf --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/mod.rs @@ -0,0 +1,11 @@ +pub mod compositor; +pub mod data_device; +pub mod dmabuf; +pub mod output; +pub mod presentation; +pub mod relative_pointer; +pub mod seat; +pub mod shm; +pub mod viewporter; +pub mod wl_drm; +pub mod xdg; diff --git a/wayland-display-core/src/wayland/handlers/output.rs b/wayland-display-core/src/wayland/handlers/output.rs new file mode 100644 index 0000000..1fe2479 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/output.rs @@ -0,0 +1,5 @@ +use smithay::delegate_output; + +use crate::comp::State; + +delegate_output!(State); diff --git a/wayland-display-core/src/wayland/handlers/presentation.rs b/wayland-display-core/src/wayland/handlers/presentation.rs new file mode 100644 index 0000000..dfdc129 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/presentation.rs @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-3.0-only + +use crate::comp::State; +use smithay::delegate_presentation; + +delegate_presentation!(State); diff --git a/wayland-display-core/src/wayland/handlers/relative_pointer.rs b/wayland-display-core/src/wayland/handlers/relative_pointer.rs new file mode 100644 index 0000000..1d1bd5c --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/relative_pointer.rs @@ -0,0 +1,5 @@ +use smithay::delegate_relative_pointer; + +use crate::comp::State; + +delegate_relative_pointer!(State); \ No newline at end of file diff --git a/wayland-display-core/src/wayland/handlers/seat.rs b/wayland-display-core/src/wayland/handlers/seat.rs new file mode 100644 index 0000000..ccad3d1 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/seat.rs @@ -0,0 +1,35 @@ +use smithay::{ + delegate_seat, + input::{pointer::CursorImageStatus, Seat, SeatHandler, SeatState}, + reexports::wayland_server::Resource, + wayland::data_device::set_data_device_focus, +}; + +use crate::comp::{FocusTarget, State}; + +impl SeatHandler for State { + type KeyboardFocus = FocusTarget; + type PointerFocus = FocusTarget; + + fn seat_state(&mut self) -> &mut SeatState { + &mut self.seat_state + } + + fn focus_changed(&mut self, seat: &Seat, focus: Option<&Self::KeyboardFocus>) { + if let Some(surface) = focus { + let client = match surface { + FocusTarget::Wayland(w) => w.toplevel().wl_surface().client(), + FocusTarget::Popup(p) => p.wl_surface().client(), + }; + set_data_device_focus(&self.dh, seat, client); + } else { + set_data_device_focus(&self.dh, seat, None); + } + } + + fn cursor_image(&mut self, _seat: &Seat, image: CursorImageStatus) { + self.cursor_state = image; + } +} + +delegate_seat!(State); diff --git a/wayland-display-core/src/wayland/handlers/shm.rs b/wayland-display-core/src/wayland/handlers/shm.rs new file mode 100644 index 0000000..4e60077 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/shm.rs @@ -0,0 +1,14 @@ +use smithay::{ + delegate_shm, + wayland::shm::{ShmHandler, ShmState}, +}; + +use crate::comp::State; + +impl ShmHandler for State { + fn shm_state(&self) -> &ShmState { + &self.shm_state + } +} + +delegate_shm!(State); diff --git a/wayland-display-core/src/wayland/handlers/viewporter.rs b/wayland-display-core/src/wayland/handlers/viewporter.rs new file mode 100644 index 0000000..3e5edc3 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/viewporter.rs @@ -0,0 +1,5 @@ +use smithay::delegate_viewporter; + +use crate::comp::State; + +delegate_viewporter!(State); diff --git a/wayland-display-core/src/wayland/handlers/wl_drm.rs b/wayland-display-core/src/wayland/handlers/wl_drm.rs new file mode 100644 index 0000000..4ad8c47 --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/wl_drm.rs @@ -0,0 +1,3 @@ +use crate::{comp::State, wayland::protocols::wl_drm::delegate_wl_drm}; + +delegate_wl_drm!(State); diff --git a/wayland-display-core/src/wayland/handlers/xdg.rs b/wayland-display-core/src/wayland/handlers/xdg.rs new file mode 100644 index 0000000..19b8a6a --- /dev/null +++ b/wayland-display-core/src/wayland/handlers/xdg.rs @@ -0,0 +1,83 @@ +use smithay::{ + delegate_xdg_shell, + desktop::{ + find_popup_root_surface, PopupKeyboardGrab, PopupKind, PopupPointerGrab, + PopupUngrabStrategy, Window, + }, + input::{pointer::Focus, Seat}, + reexports::wayland_server::protocol::wl_seat::WlSeat, + utils::Serial, + wayland::{ + seat::WaylandFocus, + shell::xdg::{ + PopupSurface, PositionerState, ToplevelSurface, XdgShellHandler, XdgShellState, + }, + }, +}; + +use crate::comp::{FocusTarget, State}; + +impl XdgShellHandler for State { + fn xdg_shell_state(&mut self) -> &mut XdgShellState { + &mut self.shell_state + } + + fn new_toplevel(&mut self, surface: ToplevelSurface) { + let window = Window::new(surface); + self.pending_windows.push(window); + } + + fn new_popup(&mut self, surface: PopupSurface, positioner: PositionerState) { + // TODO: properly recompute the geometry with the whole of positioner state + surface.with_pending_state(|state| { + // NOTE: This is not really necessary as the default geometry + // is already set the same way, but for demonstrating how + // to set the initial popup geometry this code is left as + // an example + state.geometry = positioner.get_geometry(); + }); + if let Err(err) = self.popups.track_popup(PopupKind::from(surface)) { + tracing::warn!(?err, "Failed to track popup."); + } + } + + fn grab(&mut self, surface: PopupSurface, seat: WlSeat, serial: Serial) { + let seat: Seat = Seat::from_resource(&seat).unwrap(); + let kind = PopupKind::Xdg(surface.clone()); + if let Some(root) = find_popup_root_surface(&kind).ok().and_then(|root| { + self.space + .elements() + .find(|w| w.wl_surface().map(|s| s == root).unwrap_or(false)) + .cloned() + .map(FocusTarget::from) + }) { + let ret = self.popups.grab_popup(root, surface.into(), &seat, serial); + if let Ok(mut grab) = ret { + if let Some(keyboard) = seat.get_keyboard() { + if keyboard.is_grabbed() + && !(keyboard.has_grab(serial) + || keyboard.has_grab(grab.previous_serial().unwrap_or(serial))) + { + grab.ungrab(PopupUngrabStrategy::All); + return; + } + keyboard.set_focus(self, grab.current_grab(), serial); + keyboard.set_grab(PopupKeyboardGrab::new(&grab), serial); + } + if let Some(pointer) = seat.get_pointer() { + if pointer.is_grabbed() + && !(pointer.has_grab(serial) + || pointer + .has_grab(grab.previous_serial().unwrap_or_else(|| grab.serial()))) + { + grab.ungrab(PopupUngrabStrategy::All); + return; + } + pointer.set_grab(self, PopupPointerGrab::new(&grab), serial, Focus::Clear); + } + } + } + } +} + +delegate_xdg_shell!(State); diff --git a/wayland-display-core/src/wayland/mod.rs b/wayland-display-core/src/wayland/mod.rs new file mode 100644 index 0000000..5d3c3fd --- /dev/null +++ b/wayland-display-core/src/wayland/mod.rs @@ -0,0 +1,2 @@ +pub mod handlers; +pub mod protocols; diff --git a/wayland-display-core/src/wayland/protocols/mod.rs b/wayland-display-core/src/wayland/protocols/mod.rs new file mode 100644 index 0000000..36d11e7 --- /dev/null +++ b/wayland-display-core/src/wayland/protocols/mod.rs @@ -0,0 +1 @@ +pub mod wl_drm; diff --git a/wayland-display-core/src/wayland/protocols/wl_drm.rs b/wayland-display-core/src/wayland/protocols/wl_drm.rs new file mode 100644 index 0000000..dc39b45 --- /dev/null +++ b/wayland-display-core/src/wayland/protocols/wl_drm.rs @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-3.0-only + +// Re-export only the actual code, and then only use this re-export +// The `generated` module below is just some boilerplate to properly isolate stuff +// and avoid exposing internal details. +// +// You can use all the types from my_protocol as if they went from `wayland_client::protocol`. +pub use generated::wl_drm; + +mod generated { + use smithay::reexports::wayland_server::{self, protocol::*}; + + pub mod __interfaces { + use smithay::reexports::wayland_server::protocol::__interfaces::*; + use wayland_backend; + wayland_scanner::generate_interfaces!("resources/protocols/wayland-drm.xml"); + } + use self::__interfaces::*; + + wayland_scanner::generate_server_code!("resources/protocols/wayland-drm.xml"); +} + +use smithay::{ + backend::allocator::{ + dmabuf::{Dmabuf, DmabufFlags}, + Format, Fourcc, Modifier, + }, + reexports::wayland_server::{ + backend::GlobalId, protocol::wl_buffer::WlBuffer, Client, DataInit, Dispatch, + DisplayHandle, GlobalDispatch, New, Resource, + }, + wayland::{ + buffer::BufferHandler, + dmabuf::{DmabufGlobal, DmabufHandler, ImportError}, + }, +}; + +use std::{convert::TryFrom, path::PathBuf, sync::Arc}; + +pub struct WlDrmState; + +/// Data associated with a drm global. +pub struct DrmGlobalData { + filter: Box Fn(&'a Client) -> bool + Send + Sync>, + formats: Arc>, + device_path: PathBuf, + dmabuf_global: DmabufGlobal, +} + +pub struct DrmInstanceData { + formats: Arc>, + dmabuf_global: DmabufGlobal, +} + +impl GlobalDispatch for WlDrmState +where + D: GlobalDispatch + + Dispatch + + BufferHandler + + DmabufHandler + + 'static, +{ + fn bind( + _state: &mut D, + _dh: &DisplayHandle, + _client: &Client, + resource: New, + global_data: &DrmGlobalData, + data_init: &mut DataInit<'_, D>, + ) { + let data = DrmInstanceData { + formats: global_data.formats.clone(), + dmabuf_global: global_data.dmabuf_global.clone(), + }; + let drm_instance = data_init.init(resource, data); + + drm_instance.device(global_data.device_path.to_string_lossy().into_owned()); + if drm_instance.version() >= 2 { + drm_instance.capabilities(wl_drm::Capability::Prime as u32); + } + for format in global_data.formats.iter() { + if let Ok(converted) = wl_drm::Format::try_from(*format as u32) { + drm_instance.format(converted as u32); + } + } + } + + fn can_view(client: Client, global_data: &DrmGlobalData) -> bool { + (global_data.filter)(&client) + } +} + +impl Dispatch for WlDrmState +where + D: GlobalDispatch + + Dispatch + + Dispatch + + BufferHandler + + DmabufHandler + + 'static, +{ + fn request( + state: &mut D, + _client: &Client, + drm: &wl_drm::WlDrm, + request: wl_drm::Request, + data: &DrmInstanceData, + _dh: &DisplayHandle, + data_init: &mut DataInit<'_, D>, + ) { + match request { + wl_drm::Request::Authenticate { .. } => drm.authenticated(), + wl_drm::Request::CreateBuffer { .. } => drm.post_error( + wl_drm::Error::InvalidName, + String::from("Flink handles are unsupported, use PRIME"), + ), + wl_drm::Request::CreatePlanarBuffer { .. } => drm.post_error( + wl_drm::Error::InvalidName, + String::from("Flink handles are unsupported, use PRIME"), + ), + wl_drm::Request::CreatePrimeBuffer { + id, + name, + width, + height, + format, + offset0, + stride0, + .. + } => { + let format = match Fourcc::try_from(format) { + Ok(format) => { + if !data.formats.contains(&format) { + drm.post_error( + wl_drm::Error::InvalidFormat, + String::from("Format not advertised by wl_drm"), + ); + return; + } + format + } + Err(_) => { + drm.post_error( + wl_drm::Error::InvalidFormat, + String::from("Format unknown / not advertised by wl_drm"), + ); + return; + } + }; + + if width < 1 || height < 1 { + drm.post_error( + wl_drm::Error::InvalidFormat, + String::from("width or height not positive"), + ); + return; + } + + let mut dma = Dmabuf::builder((width, height), format, DmabufFlags::empty()); + dma.add_plane(name, 0, offset0 as u32, stride0 as u32, Modifier::Invalid); + match dma.build() { + Some(dmabuf) => { + match state.dmabuf_imported(&data.dmabuf_global, dmabuf.clone()) { + Ok(_) => { + // import was successful + data_init.init(id, dmabuf); + } + + Err(ImportError::InvalidFormat) => { + drm.post_error( + wl_drm::Error::InvalidFormat, + "format and plane combination are not valid", + ); + } + + Err(ImportError::Failed) => { + // Buffer import failed. The protocol documentation heavily implies killing the + // client is the right thing to do here. + drm.post_error(wl_drm::Error::InvalidName, "buffer import failed"); + } + } + } + None => { + // Buffer import failed. The protocol documentation heavily implies killing the + // client is the right thing to do here. + drm.post_error( + wl_drm::Error::InvalidName, + "dmabuf global was destroyed on server", + ); + } + } + } + } + } +} + +pub fn create_drm_global( + display: &DisplayHandle, + device_path: PathBuf, + formats: Vec, + dmabuf_global: &DmabufGlobal, +) -> GlobalId +where + D: GlobalDispatch + + Dispatch + + BufferHandler + + DmabufHandler + + 'static, +{ + create_drm_global_with_filter::(display, device_path, formats, dmabuf_global, |_| true) +} + +pub fn create_drm_global_with_filter( + display: &DisplayHandle, + device_path: PathBuf, + formats: Vec, + dmabuf_global: &DmabufGlobal, + client_filter: F, +) -> GlobalId +where + D: GlobalDispatch + + Dispatch + + BufferHandler + + DmabufHandler + + 'static, + F: for<'a> Fn(&'a Client) -> bool + Send + Sync + 'static, +{ + let formats = Arc::new( + formats + .into_iter() + .filter(|f| f.modifier == Modifier::Invalid) + .map(|f| f.code) + .collect(), + ); + let data = DrmGlobalData { + filter: Box::new(client_filter), + formats, + device_path, + dmabuf_global: dmabuf_global.clone(), + }; + + display.create_global::(2, data) +} + +macro_rules! delegate_wl_drm { + ($(@<$( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+>)? $ty: ty) => { + smithay::reexports::wayland_server::delegate_global_dispatch!($(@< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $ty: [ + $crate::wayland::protocols::wl_drm::wl_drm::WlDrm: $crate::wayland::protocols::wl_drm::DrmGlobalData + ] => $crate::wayland::protocols::wl_drm::WlDrmState); + smithay::reexports::wayland_server::delegate_dispatch!($(@< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $ty: [ + $crate::wayland::protocols::wl_drm::wl_drm::WlDrm: $crate::wayland::protocols::wl_drm::DrmInstanceData + ] => $crate::wayland::protocols::wl_drm::WlDrmState); + }; +} +pub(crate) use delegate_wl_drm;