mirror of
https://github.com/nestriness/cdc-file-transfer.git
synced 2026-01-30 10:35:37 +02:00
Releasing the former Stadia file transfer tools
The tools allow efficient and fast synchronization of large directory trees from a Windows workstation to a Linux target machine. cdc_rsync* support efficient copy of files by using content-defined chunking (CDC) to identify chunks within files that can be reused. asset_stream_manager + cdc_fuse_fs support efficient streaming of a local directory to a remote virtual file system based on FUSE. It also employs CDC to identify and reuse unchanged data chunks.
This commit is contained in:
70
.bazelrc
Normal file
70
.bazelrc
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
build:windows --cxxopt=/std:c++17
|
||||||
|
build:windows --host_cxxopt=/std:c++17
|
||||||
|
build:windows --linkopt="/OPT:REF"
|
||||||
|
build:windows --linkopt="/OPT:ICF"
|
||||||
|
build:windows --linkopt="/LTCG"
|
||||||
|
|
||||||
|
build:windows --copt="/DUNICODE"
|
||||||
|
build:windows --copt="/D_UNICODE"
|
||||||
|
build:windows --copt="/W4" # Warning level 4
|
||||||
|
build:windows --copt="/WX" # Treat warnings as errors
|
||||||
|
build:windows --copt="/Zc:forScope" # for loop initializer goes out of scope after loop
|
||||||
|
build:windows --copt="/EHsc" # Catches C++ exceptions only; assuming functions with `extern "C"` linkage never throw
|
||||||
|
build:windows --copt="/Zc:rvalueCast" # Enforce type conversion rules
|
||||||
|
build:windows --copt="/Zc:strictStrings" # Disable string literal type conversion
|
||||||
|
|
||||||
|
# Warnings occuring in //third_party/grpc/...
|
||||||
|
build:windows --copt="/wd4018" # signed/unsigned mismatch
|
||||||
|
build:windows --copt="/wd4090" # different 'const' qualifiers
|
||||||
|
build:windows --copt="/wd4100" # unreferenced formal parameter
|
||||||
|
build:windows --copt="/wd4101" # unreferenced local variable
|
||||||
|
build:windows --copt="/wd4116" # unnamed type definition in parentheses
|
||||||
|
build:windows --copt="/wd4127" # conditional expression is constant
|
||||||
|
build:windows --copt="/wd4131" # old-style declarator
|
||||||
|
build:windows --copt="/wd4146" # unary minus operator applied to unsigned type
|
||||||
|
build:windows --copt="/wd4200" # nonstandard extension used: zero-sized array in struct/union
|
||||||
|
build:windows --copt="/wd4201" # nonstandard extension used: nameless struct/union
|
||||||
|
build:windows --copt="/wd4206" # nonstandard extension used: translation unit is empty
|
||||||
|
build:windows --copt="/wd4267" # conversion from 'size_t' to 'type', possible loss of data
|
||||||
|
build:windows --copt="/wd4244" # implicit narrowing conversion
|
||||||
|
build:windows --copt="/wd4245" # conversion from 'int' to 'uint32_t', signed/unsigned mismatch
|
||||||
|
build:windows --copt="/wd4310" # cast truncates constant value
|
||||||
|
build:windows --copt="/wd4312" # reinterpret_cast': conversion from ... to ... of greater size
|
||||||
|
build:windows --copt="/wd4324" # structure was padded due to alignment specifier
|
||||||
|
build:windows --copt="/wd4334" # result of 32-bit shift implicitly converted to 64 bits
|
||||||
|
build:windows --copt="/wd4389" # signed/unsigned mismatch
|
||||||
|
build:windows --copt="/wd4456" # declaration of 'var' hides previous local declaration
|
||||||
|
build:windows --copt="/wd4457" # declaration of 'var' hides function parameter
|
||||||
|
build:windows --copt="/wd4458" # declaration hides class member
|
||||||
|
build:windows --copt="/wd4459" # declaration of 'var' hides global declaration
|
||||||
|
build:windows --copt="/wd4646" # function declared with 'noreturn' has non-void return type
|
||||||
|
build:windows --copt="/wd4700" # uninitialized local variable used
|
||||||
|
build:windows --copt="/wd4701" # potentially uninitialized local variable used
|
||||||
|
build:windows --copt="/wd4702" # unreachable code
|
||||||
|
build:windows --copt="/wd4703" # potentially uninitialized local pointer variable used
|
||||||
|
build:windows --copt="/wd4706" # assignment within conditional expression
|
||||||
|
build:windows --copt="/wd4715" # not all control paths return a value
|
||||||
|
build:windows --copt="/wd4805" # unsafe mix of type 'int' and type 'bool' in operation
|
||||||
|
build:windows --copt="/wd4815" # zero-sized array in stack object will have no elements
|
||||||
|
build:windows --copt="/wd4834" # discarding return value of function with 'nodiscard' attribute
|
||||||
|
# Additional warnings occuring in //third_party/protobuf
|
||||||
|
build:windows --copt="/wd4125" # decimal digit terminates octal escape sequence
|
||||||
|
# Additional warnings occuring in other third_party libraries
|
||||||
|
build:windows --copt="/wd4005" # macro redefinition
|
||||||
|
# Additional warnings occuring in upb (opt-mode)
|
||||||
|
build:windows --copt="/wd4189" # local variable is initialized but not referenced
|
||||||
|
|
||||||
|
# googletest uses this define.
|
||||||
|
build:windows --define absl=1
|
||||||
|
|
||||||
|
# Linux
|
||||||
|
build:linux --cxxopt=-std=c++17
|
||||||
|
build:linux --host_cxxopt=-std=c++17
|
||||||
|
build:linux --define absl=1
|
||||||
|
|
||||||
|
# Additional warnings occuring in //third_party/protobuf
|
||||||
|
build:linux --copt="-Wno-stringop-overflow"
|
||||||
|
# Additional warnings occuring in boringssl
|
||||||
|
build:linux --copt="-Wno-array-bounds"
|
||||||
|
|
||||||
|
try-import %workspace%/user.bazelrc
|
||||||
29
.clang-format
Normal file
29
.clang-format
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
Language: Proto
|
||||||
|
BasedOnStyle: Google
|
||||||
|
|
||||||
|
---
|
||||||
|
Language: Cpp
|
||||||
|
BasedOnStyle: Google
|
||||||
|
|
||||||
|
DerivePointerAlignment: false
|
||||||
|
PointerAlignment: Left
|
||||||
|
SortIncludes: true
|
||||||
|
---
|
||||||
|
Language: JavaScript
|
||||||
|
BasedOnStyle: Google
|
||||||
|
|
||||||
|
AllowShortFunctionsOnASingleLine: All
|
||||||
|
AllowShortIfStatementsOnASingleLine: false
|
||||||
|
AllowShortLoopsOnASingleLine: false
|
||||||
|
|
||||||
|
---
|
||||||
|
Language: TextProto
|
||||||
|
BasedOnStyle: Google
|
||||||
|
|
||||||
|
---
|
||||||
|
Language: CSharp
|
||||||
|
BasedOnStyle: Microsoft
|
||||||
|
ColumnLimit: 100
|
||||||
|
NamespaceIndentation: All
|
||||||
|
BreakBeforeTernaryOperators: true
|
||||||
13
.gitignore
vendored
Normal file
13
.gitignore
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
.vs
|
||||||
|
.vscode
|
||||||
|
*.log
|
||||||
|
bin
|
||||||
|
dependencies
|
||||||
|
*.cflags
|
||||||
|
*.config
|
||||||
|
*.creator*
|
||||||
|
*.cxxflags
|
||||||
|
*.files
|
||||||
|
*.includes
|
||||||
|
.qtc_clangd
|
||||||
|
bazel-*
|
||||||
12
.gitmodules
vendored
Normal file
12
.gitmodules
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[submodule "third_party/absl"]
|
||||||
|
path = third_party/absl
|
||||||
|
url = https://github.com/abseil/abseil-cpp.git
|
||||||
|
[submodule "third_party/protobuf"]
|
||||||
|
path = third_party/protobuf
|
||||||
|
url = https://github.com/google/protobuf.git
|
||||||
|
[submodule "third_party/googletest"]
|
||||||
|
path = third_party/googletest
|
||||||
|
url = https://github.com/google/googletest.git
|
||||||
|
[submodule "third_party/grpc"]
|
||||||
|
path = third_party/grpc
|
||||||
|
url = https://github.com/grpc/grpc.git
|
||||||
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
60
NMakeBazelProject.targets
Normal file
60
NMakeBazelProject.targets
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||||
|
<!-- Adds support for building Bazel projects as Visual Studio NMake projects.
|
||||||
|
Works for both x64 and GGP platforms.
|
||||||
|
|
||||||
|
Usage: Define 4 properties in your project file:
|
||||||
|
BazelTargets: Labels of Bazel targets to build, e.g. //common:status.
|
||||||
|
BazelOutputFile: Output filename, e.g. cdc_rsync.exe
|
||||||
|
BazelSourcePathPrefix: Prefix for source paths, to translate paths relative to (/foo) to project-relative paths (../foo).
|
||||||
|
Must be escaped for sed (e.g. / -> \/), e.g. ..\/..\/..\/
|
||||||
|
Optionally, define:
|
||||||
|
BazelIncludePaths: Include paths, used for Intellisense.
|
||||||
|
Import NMakeCMakeProject.targets. -->
|
||||||
|
|
||||||
|
<!-- Check whether all properties are defined. -->
|
||||||
|
<Target Name="CheckProperties" BeforeTargets="PrepareForNMakeBuild">
|
||||||
|
<Error Condition="'$(BazelTargets)' == ''" Text="Please define property BazelTargets" />
|
||||||
|
<Error Condition="'$(BazelOutputFile)' == ''" Text="Please define property BazelOutputFile" />
|
||||||
|
<Error Condition="'$(BazelSourcePathPrefix)' == ''" Text="Please define property BazelSourcePathPrefix" />
|
||||||
|
</Target>
|
||||||
|
|
||||||
|
<!-- Define Bazel properties. -->
|
||||||
|
<PropertyGroup>
|
||||||
|
<BazelPlatform Condition="'$(Platform)'=='x64'">windows</BazelPlatform>
|
||||||
|
<BazelPlatform Condition="'$(Platform)'=='GGP'">ggp_windows</BazelPlatform>
|
||||||
|
<BazelPlatformDir Condition="'$(Platform)'=='x64'">x64-windows</BazelPlatformDir>
|
||||||
|
<BazelPlatformDir Condition="'$(Platform)'=='GGP'">k8</BazelPlatformDir>
|
||||||
|
<BazelCompilationMode Condition="'$(Configuration)'=='Debug'">dbg</BazelCompilationMode>
|
||||||
|
<BazelCompilationMode Condition="'$(Configuration)'=='Release'">opt</BazelCompilationMode>
|
||||||
|
<!-- The sed command converts repo-relative paths (/path/to/foo) to project-relative paths (../path/to/foo), so that errors etc. can be clicked in VS.
|
||||||
|
Matches foo:31:8: bar, foo(31): bar, foo(31,32): bar -->
|
||||||
|
<BazelSedCommand>| sed -r "s/^([^:\(]+[:\(][[:digit:]]+(,[[:digit:]]+)?[:\)])/$(BazelSourcePathPrefix)\\1/"</BazelSedCommand>
|
||||||
|
<!-- Clang prints errors to stderr, so pipe it into stdout. -->
|
||||||
|
<BazelSedCommand Condition="'$(Platform)'=='GGP'">2>&1 $(BazelSedCommand)</BazelSedCommand>
|
||||||
|
<!-- The workspace_status_command saves about 8 seconds. "exit 0" is roughly equivalent to 'true'. -->
|
||||||
|
<BazelArgs>--config=$(BazelPlatform) --workspace_status_command="exit 0" --bes_backend=</BazelArgs>
|
||||||
|
<BazelArgs Condition="'$(Configuration)|$(Platform)'=='Release|GGP'">$(BazelArgs) --linkopt=-Wl,--strip-all</BazelArgs>
|
||||||
|
<!-- Prevent protobuf recompilation (protobuf is written to "host" by default for both x84 and ggp, causing recompiles). -->
|
||||||
|
<BazelArgs Condition="'$(Platform)'=='x64'">$(BazelArgs) --distinct_host_configuration=false</BazelArgs>
|
||||||
|
<!-- Windows uses /LTCG (link-time code generation), but no /GL (global optimization), which is a requirement for that. -->
|
||||||
|
<BazelArgs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(BazelArgs) --copt=/GL</BazelArgs>
|
||||||
|
<!-- Strip unused symbols on GGP. -->
|
||||||
|
<BazelArgs Condition="'$(Configuration)|$(Platform)'=='Release|GGP'">$(BazelArgs) --copt=-fdata-sections --copt=-ffunction-sections --linkopt=-Wl,--gc-sections</BazelArgs>
|
||||||
|
<!-- VS creates a bazel-out DIRECTORY if it doesn't exist yet and prevents Bazel from creating a bazel-out SYMLINK. -->
|
||||||
|
<RmBazelOutDir>cmd.exe /Q /C $(SolutionDir)rm_bazel_out_dir.bat &&</RmBazelOutDir>
|
||||||
|
<!-- Bazel output is always read-only, which confuses a bunch of tools that upload binaries to gamelets and fail the second time. -->
|
||||||
|
<MakeRW>&& attrib -r $(OutDir)*</MakeRW>
|
||||||
|
<!-- Include standard libraries for GGP Intellisense -->
|
||||||
|
<BazelIncludePaths Condition="'$(Platform)'=='GGP'">$(GGP_SDK_PATH)BaseSDK\LLVM\10.0.1\include\c++\v1;$(GGP_SDK_PATH)BaseSDK\LLVM\10.0.1\lib\clang\10.0.1\include;$(GGP_SDK_PATH)sysroot\usr\include\x86_64-linux-gnu;$(GGP_SDK_PATH)sysroot\usr\include;$(BazelIncludePaths)</BazelIncludePaths>
|
||||||
|
</PropertyGroup>
|
||||||
|
|
||||||
|
<!-- Define NMake properties. -->
|
||||||
|
<PropertyGroup>
|
||||||
|
<NMakeIncludeSearchPath>$(SolutionDir)..\..\bazel-out\$(BazelPlatformDir)-$(BazelCompilationMode)\bin;$(BazelIncludePaths)</NMakeIncludeSearchPath>
|
||||||
|
<NMakeBuildCommandLine>$(RmBazelOutDir) bazel build --compilation_mode=$(BazelCompilationMode) $(BazelArgs) $(BazelTargets) $(BazelSedCommand) $(MakeRW)</NMakeBuildCommandLine>
|
||||||
|
<NMakeCleanCommandLine>$(RmBazelOutDir) bazel clean</NMakeCleanCommandLine>
|
||||||
|
<NMakeReBuildCommandLine>$(RmBazelOutDir) bazel clean && bazel build --compilation_mode=$(BazelCompilationMode) $(BazelArgs) $(BazelTargets) $(BazelSedCommand) $(MakeRW)</NMakeReBuildCommandLine>
|
||||||
|
<NMakeOutput>$(OutDir)$(BazelOutputFile)</NMakeOutput>
|
||||||
|
</PropertyGroup>
|
||||||
|
</Project>
|
||||||
14
README.md
Normal file
14
README.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# CDC File Transfer
|
||||||
|
|
||||||
|
This repository contains tools for synching and streaming files. They are based
|
||||||
|
on Content Defined Chunking (CDC), in particular
|
||||||
|
[FastCDC](https://www.usenix.org/conference/atc16/technical-sessions/presentation/xia),
|
||||||
|
to split up files into chunks.
|
||||||
|
|
||||||
|
## CDC RSync
|
||||||
|
Tool to sync files to a remote machine, similar to the standard Linux
|
||||||
|
[rsync](https://linux.die.net/man/1/rsync). It supports fast compression and
|
||||||
|
uses a higher performing remote diffing approach based on CDC.
|
||||||
|
|
||||||
|
## Asset Streaming
|
||||||
|
Tool to stream assets from a Windows machine to a Linux device.
|
||||||
99
WORKSPACE
Normal file
99
WORKSPACE
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
workspace(name = "cdc_file_transfer")
|
||||||
|
|
||||||
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "bazel_skylib",
|
||||||
|
sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728",
|
||||||
|
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz"],
|
||||||
|
)
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "rules_pkg",
|
||||||
|
sha256 = "451e08a4d78988c06fa3f9306ec813b836b1d076d0f055595444ba4ff22b867f",
|
||||||
|
urls = [
|
||||||
|
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.7.1/rules_pkg-0.7.1.tar.gz",
|
||||||
|
"https://github.com/bazelbuild/rules_pkg/releases/download/0.7.1/rules_pkg-0.7.1.tar.gz",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||||
|
|
||||||
|
rules_pkg_dependencies()
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "com_googlesource_code_re2",
|
||||||
|
sha256 = "f89c61410a072e5cbcf8c27e3a778da7d6fd2f2b5b1445cd4f4508bee946ab0f",
|
||||||
|
strip_prefix = "re2-2022-06-01",
|
||||||
|
url = "https://github.com/google/re2/archive/refs/tags/2022-06-01.tar.gz",
|
||||||
|
)
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "com_github_zstd",
|
||||||
|
build_file = "@//third_party/zstd:BUILD.bazel",
|
||||||
|
sha256 = "f7de13462f7a82c29ab865820149e778cbfe01087b3a55b5332707abf9db4a6e",
|
||||||
|
strip_prefix = "zstd-1.5.2",
|
||||||
|
url = "https://github.com/facebook/zstd/archive/refs/tags/v1.5.2.tar.gz",
|
||||||
|
)
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "com_github_blake3",
|
||||||
|
build_file = "@//third_party/blake3:BUILD.bazel",
|
||||||
|
sha256 = "112becf0983b5c83efff07f20b458f2dbcdbd768fd46502e7ddd831b83550109",
|
||||||
|
strip_prefix = "BLAKE3-1.3.1",
|
||||||
|
url = "https://github.com/BLAKE3-team/BLAKE3/archive/refs/tags/1.3.1.tar.gz",
|
||||||
|
)
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "com_github_fuse",
|
||||||
|
build_file = "@//third_party/fuse:BUILD",
|
||||||
|
sha256 = "832432d1ad4f833c20e13b57cf40ce5277a9d33e483205fc63c78111b3358874",
|
||||||
|
strip_prefix = "fuse-2.9.7",
|
||||||
|
patch_args = ["-p1"],
|
||||||
|
patches = ["@//third_party/fuse:disable_symbol_versioning.patch"],
|
||||||
|
url = "https://github.com/libfuse/libfuse/releases/download/fuse-2.9.7/fuse-2.9.7.tar.gz",
|
||||||
|
)
|
||||||
|
|
||||||
|
http_archive(
|
||||||
|
name = "com_github_jsoncpp",
|
||||||
|
sha256 = "f409856e5920c18d0c2fb85276e24ee607d2a09b5e7d5f0a371368903c275da2",
|
||||||
|
strip_prefix = "jsoncpp-1.9.5",
|
||||||
|
url = "https://github.com/open-source-parsers/jsoncpp/archive/refs/tags/1.9.5.tar.gz",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Only required for //cdc_indexer.
|
||||||
|
http_archive(
|
||||||
|
name = "com_github_dirent",
|
||||||
|
build_file = "@//third_party/dirent:BUILD.bazel",
|
||||||
|
sha256 = "f72d39e3c39610b6901e391b140aa69b51e0eb99216939ed5e547b5dad03afb1",
|
||||||
|
strip_prefix = "dirent-1.23.2",
|
||||||
|
url = "https://github.com/tronkko/dirent/archive/refs/tags/1.23.2.tar.gz",
|
||||||
|
)
|
||||||
|
|
||||||
|
local_repository(
|
||||||
|
name = "com_google_absl",
|
||||||
|
path = "third_party/absl",
|
||||||
|
)
|
||||||
|
|
||||||
|
local_repository(
|
||||||
|
name = "com_google_googletest",
|
||||||
|
path = "third_party/googletest",
|
||||||
|
)
|
||||||
|
|
||||||
|
local_repository(
|
||||||
|
name = "com_google_protobuf",
|
||||||
|
path = "third_party/protobuf",
|
||||||
|
)
|
||||||
|
|
||||||
|
local_repository(
|
||||||
|
name = "com_github_grpc_grpc",
|
||||||
|
path = "third_party/grpc",
|
||||||
|
)
|
||||||
|
|
||||||
|
load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps")
|
||||||
|
|
||||||
|
grpc_deps()
|
||||||
|
|
||||||
|
load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps")
|
||||||
|
|
||||||
|
grpc_extra_deps()
|
||||||
12
absl_helper/BUILD
Normal file
12
absl_helper/BUILD
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "jedec_size_flag",
|
||||||
|
srcs = ["jedec_size_flag.cc"],
|
||||||
|
hdrs = ["jedec_size_flag.h"],
|
||||||
|
deps = [
|
||||||
|
"@com_google_absl//absl/flags:flag",
|
||||||
|
"@com_google_absl//absl/flags:marshalling",
|
||||||
|
"@com_google_absl//absl/strings",
|
||||||
|
],
|
||||||
|
)
|
||||||
93
absl_helper/jedec_size_flag.cc
Normal file
93
absl_helper/jedec_size_flag.cc
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "absl_helper/jedec_size_flag.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
JedecUnit ToJedecUnit(char c) {
|
||||||
|
switch (c) {
|
||||||
|
case 'b':
|
||||||
|
case 'B':
|
||||||
|
return JedecUnit::Byte;
|
||||||
|
case 'k':
|
||||||
|
case 'K':
|
||||||
|
return JedecUnit::Kilo;
|
||||||
|
case 'm':
|
||||||
|
case 'M':
|
||||||
|
return JedecUnit::Mega;
|
||||||
|
case 'g':
|
||||||
|
case 'G':
|
||||||
|
return JedecUnit::Giga;
|
||||||
|
case 't':
|
||||||
|
case 'T':
|
||||||
|
return JedecUnit::Tera;
|
||||||
|
case 'p':
|
||||||
|
case 'P':
|
||||||
|
return JedecUnit::Peta;
|
||||||
|
default:
|
||||||
|
return JedecUnit::Unkown;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int LeftShiftAmount(JedecUnit unit) {
|
||||||
|
switch (unit) {
|
||||||
|
case JedecUnit::Kilo:
|
||||||
|
return 10;
|
||||||
|
case JedecUnit::Mega:
|
||||||
|
return 20;
|
||||||
|
case JedecUnit::Giga:
|
||||||
|
return 30;
|
||||||
|
case JedecUnit::Tera:
|
||||||
|
return 40;
|
||||||
|
case JedecUnit::Peta:
|
||||||
|
return 50;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
bool AbslParseFlag(absl::string_view text, JedecSize* flag, std::string* err) {
|
||||||
|
if (text.empty()) return false;
|
||||||
|
JedecUnit unit = ToJedecUnit(text.back());
|
||||||
|
if (unit != JedecUnit::Unkown) {
|
||||||
|
text.remove_suffix(1);
|
||||||
|
} else {
|
||||||
|
// Are we dealing with a digit character?
|
||||||
|
if (text.back() >= '0' && text.back() <= '9') {
|
||||||
|
unit = JedecUnit::Byte;
|
||||||
|
} else {
|
||||||
|
*err =
|
||||||
|
"Supported size units are (B)yte, (K)ilo, (M)ega, (G)iga, (T)era, "
|
||||||
|
"(P)eta.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Try to parse a plain uint64_t value.
|
||||||
|
uint64_t size;
|
||||||
|
if (!absl::ParseFlag(text, &size, err)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
flag->SetSize(size << LeftShiftAmount(unit));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string AbslUnparseFlag(const JedecSize& size) {
|
||||||
|
return absl::UnparseFlag(size.Size());
|
||||||
|
}
|
||||||
|
}; // namespace cdc_ft
|
||||||
61
absl_helper/jedec_size_flag.h
Normal file
61
absl_helper/jedec_size_flag.h
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ABSL_HELPER_JEDEC_SIZE_FLAG_H_
|
||||||
|
#define ABSL_HELPER_JEDEC_SIZE_FLAG_H_
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "absl/flags/flag.h"
|
||||||
|
#include "absl/flags/marshalling.h"
|
||||||
|
#include "absl/strings/string_view.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Supported JEDEC unit suffixes.
|
||||||
|
enum class JedecUnit : char {
|
||||||
|
Unkown = 0,
|
||||||
|
Byte = 'B', // optional
|
||||||
|
Kilo = 'K',
|
||||||
|
Mega = 'M',
|
||||||
|
Giga = 'G',
|
||||||
|
Tera = 'T',
|
||||||
|
Peta = 'P',
|
||||||
|
};
|
||||||
|
|
||||||
|
// This class parses flag arguments that represent human readable data sizes,
|
||||||
|
// such as 1024, 2K, 3M, 4G, or 5T.
|
||||||
|
//
|
||||||
|
// See https://en.wikipedia.org/wiki/JEDEC_memory_standards.
|
||||||
|
class JedecSize {
|
||||||
|
public:
|
||||||
|
explicit JedecSize(uint64_t size = 0) : size_(size) {}
|
||||||
|
uint64_t Size() const { return size_; }
|
||||||
|
void SetSize(uint64_t size) { size_ = size; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
uint64_t size_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Abseil flags parser for JedecSize.
|
||||||
|
bool AbslParseFlag(absl::string_view text, JedecSize* flag, std::string* err);
|
||||||
|
|
||||||
|
// Abseil flags unparser for JedecSize.
|
||||||
|
std::string AbslUnparseFlag(const JedecSize& size);
|
||||||
|
|
||||||
|
}; // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ABSL_HELPER_JEDEC_SIZE_FLAG_H_
|
||||||
274
all_files.vcxitems
Normal file
274
all_files.vcxitems
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||||
|
<PropertyGroup Label="Globals">
|
||||||
|
<MSBuildAllProjects>$(MSBuildAllProjects);$(MSBuildThisFileFullPath)</MSBuildAllProjects>
|
||||||
|
<HasSharedItems>true</HasSharedItems>
|
||||||
|
<ItemsProjectGuid>{f542af2d-5a17-4f55-be40-b1a2a6182811}</ItemsProjectGuid>
|
||||||
|
</PropertyGroup>
|
||||||
|
<ItemDefinitionGroup>
|
||||||
|
<ClCompile>
|
||||||
|
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(MSBuildThisFileDirectory)</AdditionalIncludeDirectories>
|
||||||
|
</ClCompile>
|
||||||
|
</ItemDefinitionGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<ProjectCapability Include="SourceItemsFromImports" />
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)absl_helper\jedec_size_flag.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\asset_stream_config.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\asset_stream_server.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\background_service_impl.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\cdc_fuse_manager.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\grpc_asset_stream_server.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\local_assets_stream_manager_service_impl.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\main.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\metrics_recorder.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\metrics_recorder_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\multi_session.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\multi_session_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\session.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\session_management_server.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)asset_stream_manager\testing_asset_stream_server.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\asset.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\asset_stream_client.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\asset_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\cdc_fuse_fs.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\cdc_fuse_fs_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\config_stream_client.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\main.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\mock_libfuse.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_indexer\indexer.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_indexer\main.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\buffer.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\buffer_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\clock.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\dir_iter.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\dir_iter_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\errno_mapping.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\errno_mapping_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\file_watcher_win.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\file_watcher_win_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\gamelet_component.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\gamelet_component_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\log.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\log_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\path.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\path_filter.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\path_filter_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\path_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\port_manager_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\port_manager_win.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\process_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\process_win.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\remote_util.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\remote_util_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\scoped_handle_win.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\sdk_util.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\sdk_util_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\semaphore.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\semaphore_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\stats_collector.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\status.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\stopwatch.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\stopwatch_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\stub_process.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\test_main.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\testing_clock.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\threadpool.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\threadpool_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\thread_safe_map_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\url.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\url_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\util.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)common\util_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\data_provider.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\data_provider_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\data_store_reader.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\data_store_writer.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\disk_data_store.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\disk_data_store_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\grpc_reader.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\mem_data_store.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)data_store\mem_data_store_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\base\cdc_interface.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\base\cdc_interface_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\base\fake_socket.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\base\message_pump.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\base\message_pump_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\client_socket.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\dllmain.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\file_finder_and_sender.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\file_finder_and_sender_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\cdc_rsync.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\cdc_rsync_client.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\parallel_file_opener.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\parallel_file_opener_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\progress_tracker.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\progress_tracker_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\zstd_stream.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync\zstd_stream_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_cli\main.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_cli\params.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_cli\params_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_deleter_and_sender.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_deleter_and_sender_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_diff_generator.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_diff_generator_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_finder.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_finder_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\cdc_rsync_server.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\main.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\server_socket.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)cdc_rsync_server\unzstd_stream.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\asset_builder.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\content_id.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\content_id_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\fake_manifest_builder.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\fake_manifest_builder_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\file_chunk_map.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\file_chunk_map_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\manifest_builder.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\manifest_builder_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\manifest_iterator.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\manifest_printer.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\manifest_updater.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\manifest_updater_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\manifest_test_base.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)manifest\stats_printer.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)metrics\messages.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)metrics\messages_test.cc" />
|
||||||
|
<ClCompile Include="$(MSBuildThisFileDirectory)metrics\metrics.cc" />
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)absl_helper\jedec_size_flag.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\asset_stream_config.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\asset_stream_server.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\background_service_impl.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\cdc_fuse_manager.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\grpc_asset_stream_server.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\local_assets_stream_manager_service_impl.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\metrics_recorder.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\multi_session.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\session.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\session_config.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\session_management_server.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)asset_stream_manager\testing_asset_stream_server.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\asset.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\asset_stream_client.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\cdc_fuse_fs.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\config_stream_client.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\constants.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\mock_libfuse.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_indexer\indexer.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\buffer.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\clock.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\dir_iter.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\errno_mapping.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\file_watcher_win.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\gamelet_component.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\grpc_status.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\log.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\path.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\path_filter.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\platform.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\port_manager.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\process.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\remote_util.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\scoped_handle_win.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\sdk_util.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\semaphore.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\stats_collector.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\status.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\status_macros.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\status_test_macros.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\stopwatch.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\stub_process.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\test_main.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\testing_clock.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\threadpool.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\thread_safe_map.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\url.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)common\util.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)data_store\data_provider.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)data_store\data_store_reader.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)data_store\data_store_writer.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)data_store\disk_data_store.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)data_store\grpc_reader.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)data_store\mem_data_store.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\base\cdc_interface.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\base\fake_socket.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\base\message_pump.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\base\server_exit_code.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\base\socket.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\client_file_info.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\client_socket.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\error_messages.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\file_finder_and_sender.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\cdc_rsync.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\cdc_rsync_client.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\parallel_file_opener.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\progress_tracker.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync\zstd_stream.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_cli\params.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_deleter_and_sender.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_diff_generator.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_finder.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_server\file_info.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_server\cdc_rsync_server.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_server\server_socket.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)cdc_rsync_server\unzstd_stream.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\asset_builder.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\content_id.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\fake_manifest_builder.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\file_chunk_map.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\manifest_builder.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\manifest_iterator.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\manifest_printer.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\manifest_proto_defs.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\manifest_updater.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\manifest_test_base.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)manifest\stats_printer.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)metrics\enums.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)metrics\messages.h" />
|
||||||
|
<ClInclude Include="$(MSBuildThisFileDirectory)metrics\metrics.h" />
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<None Include="$(MSBuildThisFileDirectory).bazelrc" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory).gitignore" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)absl_helper\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)asset_stream_manager\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_fuse_fs\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_indexer\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_indexer\README.md" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync\protos\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)common\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)data_store\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync\base\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync\protos\messages.proto" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync\README.md" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync_cli\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync_server\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)cdc_rsync_tests\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)manifest\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)manifest_cli\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)metrics\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)NMakeBazelProject.targets" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)proto\asset_stream_service.proto" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)proto\background_service.proto" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)proto\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)proto\local_assets_stream_manager.proto" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)proto\manifest.proto" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)rm_bazel_out_dir.bat" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)tests_asset_streaming_30\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)tests_common\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)tests_cdc_rsync\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)tools\BUILD" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)tools\windows_cc_library.bzl" />
|
||||||
|
<None Include="$(MSBuildThisFileDirectory)WORKSPACE" />
|
||||||
|
</ItemGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<Natvis Include="$(MSBuildThisFileDirectory)manifest.natvis" />
|
||||||
|
<Natvis Include="$(MSBuildThisFileDirectory)protobuf.natvis" />
|
||||||
|
</ItemGroup>
|
||||||
|
</Project>
|
||||||
6
all_files.vcxitems.user
Normal file
6
all_files.vcxitems.user
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Project ToolsVersion="Current" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||||
|
<PropertyGroup>
|
||||||
|
<ShowAllFiles>true</ShowAllFiles>
|
||||||
|
</PropertyGroup>
|
||||||
|
</Project>
|
||||||
3
asset_stream_manager/.gitignore
vendored
Normal file
3
asset_stream_manager/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
x64/*
|
||||||
|
*.log
|
||||||
|
*.user
|
||||||
186
asset_stream_manager/BUILD
Normal file
186
asset_stream_manager/BUILD
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
package(default_visibility = [
|
||||||
|
"//:__subpackages__",
|
||||||
|
])
|
||||||
|
|
||||||
|
cc_binary(
|
||||||
|
name = "asset_stream_manager",
|
||||||
|
srcs = ["main.cc"],
|
||||||
|
data = [":roots_pem"],
|
||||||
|
deps = [
|
||||||
|
":asset_stream_config",
|
||||||
|
":session_management_server",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:sdk_util",
|
||||||
|
"//data_store:data_provider",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "asset_stream_server",
|
||||||
|
srcs = [
|
||||||
|
"asset_stream_server.cc",
|
||||||
|
"grpc_asset_stream_server.cc",
|
||||||
|
"testing_asset_stream_server.cc",
|
||||||
|
],
|
||||||
|
hdrs = [
|
||||||
|
"asset_stream_server.h",
|
||||||
|
"grpc_asset_stream_server.h",
|
||||||
|
"testing_asset_stream_server.h",
|
||||||
|
],
|
||||||
|
deps = [
|
||||||
|
"//common:grpc_status",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:status",
|
||||||
|
"//common:status_macros",
|
||||||
|
"//common:thread_safe_map",
|
||||||
|
"//data_store",
|
||||||
|
"//manifest:manifest_updater",
|
||||||
|
"//proto:asset_stream_service_grpc_proto",
|
||||||
|
"@com_google_absl//absl/strings:str_format",
|
||||||
|
"@com_google_absl//absl/time",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "asset_stream_config",
|
||||||
|
srcs = ["asset_stream_config.cc"],
|
||||||
|
hdrs = ["asset_stream_config.h"],
|
||||||
|
deps = [
|
||||||
|
":multi_session",
|
||||||
|
"//absl_helper:jedec_size_flag",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:status_macros",
|
||||||
|
"@com_github_jsoncpp//:jsoncpp",
|
||||||
|
"@com_google_absl//absl/flags:parse",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "cdc_fuse_manager",
|
||||||
|
srcs = ["cdc_fuse_manager.cc"],
|
||||||
|
hdrs = ["cdc_fuse_manager.h"],
|
||||||
|
deps = [
|
||||||
|
"//cdc_fuse_fs:constants",
|
||||||
|
"//common:gamelet_component",
|
||||||
|
"//common:remote_util",
|
||||||
|
"//common:status_macros",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
"@com_google_absl//absl/strings:str_format",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "session_management_server",
|
||||||
|
srcs = [
|
||||||
|
"background_service_impl.cc",
|
||||||
|
"background_service_impl.h",
|
||||||
|
"local_assets_stream_manager_service_impl.cc",
|
||||||
|
"local_assets_stream_manager_service_impl.h",
|
||||||
|
"session_management_server.cc",
|
||||||
|
"session_manager.cc",
|
||||||
|
"session_manager.h",
|
||||||
|
],
|
||||||
|
hdrs = ["session_management_server.h"],
|
||||||
|
deps = [
|
||||||
|
":multi_session",
|
||||||
|
"//common:grpc_status",
|
||||||
|
"//common:log",
|
||||||
|
"//common:status_macros",
|
||||||
|
"//common:util",
|
||||||
|
"//manifest:manifest_updater",
|
||||||
|
"//metrics",
|
||||||
|
"//proto:background_service_grpc_proto",
|
||||||
|
"//proto:local_assets_stream_manager_grpc_proto",
|
||||||
|
"@com_google_absl//absl/strings",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "multi_session",
|
||||||
|
srcs = [
|
||||||
|
"multi_session.cc",
|
||||||
|
"session.cc",
|
||||||
|
"session.h",
|
||||||
|
],
|
||||||
|
hdrs = [
|
||||||
|
"multi_session.h",
|
||||||
|
"session_config.h",
|
||||||
|
],
|
||||||
|
deps = [
|
||||||
|
":asset_stream_server",
|
||||||
|
":cdc_fuse_manager",
|
||||||
|
":metrics_recorder",
|
||||||
|
"//common:file_watcher",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:port_manager",
|
||||||
|
"//common:process",
|
||||||
|
"//common:remote_util",
|
||||||
|
"//common:sdk_util",
|
||||||
|
"//common:status_macros",
|
||||||
|
"//common:stopwatch",
|
||||||
|
"//data_store:disk_data_store",
|
||||||
|
"//manifest:manifest_printer",
|
||||||
|
"//manifest:manifest_updater",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "multi_session_test",
|
||||||
|
srcs = ["multi_session_test.cc"],
|
||||||
|
data = [":all_test_data"],
|
||||||
|
deps = [
|
||||||
|
":multi_session",
|
||||||
|
"//common:test_main",
|
||||||
|
"//manifest:manifest_test_base",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "metrics_recorder",
|
||||||
|
srcs = ["metrics_recorder.cc"],
|
||||||
|
hdrs = ["metrics_recorder.h"],
|
||||||
|
deps = [
|
||||||
|
"//common:log",
|
||||||
|
"//common:util",
|
||||||
|
"//metrics",
|
||||||
|
"//metrics:enums",
|
||||||
|
"//metrics:messages",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "metrics_recorder_test",
|
||||||
|
srcs = ["metrics_recorder_test.cc"],
|
||||||
|
deps = [
|
||||||
|
":metrics_recorder",
|
||||||
|
"//common:status_test_macros",
|
||||||
|
"//common:test_main",
|
||||||
|
"//metrics",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Copy roots.pem to the output folder, required for authenticated gRPC.
|
||||||
|
genrule(
|
||||||
|
name = "roots_pem",
|
||||||
|
srcs = ["@com_github_grpc_grpc//:root_certificates"],
|
||||||
|
outs = ["roots.pem"],
|
||||||
|
cmd = "cp $(location @com_github_grpc_grpc//:root_certificates) $(location roots.pem)",
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all_test_sources",
|
||||||
|
srcs = glob(["*_test.cc"]),
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all_test_data",
|
||||||
|
srcs = glob(["testdata/**"]),
|
||||||
|
)
|
||||||
184
asset_stream_manager/asset_stream_config.cc
Normal file
184
asset_stream_manager/asset_stream_config.cc
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/asset_stream_config.h"
|
||||||
|
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
#include "absl/flags/flag.h"
|
||||||
|
#include "absl/flags/parse.h"
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "absl/strings/str_join.h"
|
||||||
|
#include "absl_helper/jedec_size_flag.h"
|
||||||
|
#include "common/buffer.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
#include "json/json.h"
|
||||||
|
|
||||||
|
ABSL_DECLARE_FLAG(std::string, src_dir);
|
||||||
|
ABSL_DECLARE_FLAG(std::string, instance_ip);
|
||||||
|
ABSL_DECLARE_FLAG(uint16_t, instance_port);
|
||||||
|
ABSL_DECLARE_FLAG(int, verbosity);
|
||||||
|
ABSL_DECLARE_FLAG(bool, debug);
|
||||||
|
ABSL_DECLARE_FLAG(bool, singlethreaded);
|
||||||
|
ABSL_DECLARE_FLAG(bool, stats);
|
||||||
|
ABSL_DECLARE_FLAG(bool, quiet);
|
||||||
|
ABSL_DECLARE_FLAG(bool, check);
|
||||||
|
ABSL_DECLARE_FLAG(bool, log_to_stdout);
|
||||||
|
ABSL_DECLARE_FLAG(cdc_ft::JedecSize, cache_capacity);
|
||||||
|
ABSL_DECLARE_FLAG(uint32_t, cleanup_timeout);
|
||||||
|
ABSL_DECLARE_FLAG(uint32_t, access_idle_timeout);
|
||||||
|
ABSL_DECLARE_FLAG(int, manifest_updater_threads);
|
||||||
|
ABSL_DECLARE_FLAG(int, file_change_wait_duration_ms);
|
||||||
|
|
||||||
|
// Declare AS20 flags, so that AS30 can be used on older SDKs simply by
|
||||||
|
// replacing the binary. Note that the RETIRED_FLAGS macro can't be used
|
||||||
|
// because the flags contain dashes. This code mimics the macro.
|
||||||
|
absl::flags_internal::RetiredFlag<std::string> RETIRED_FLAGS_session_ports;
|
||||||
|
absl::flags_internal::RetiredFlag<std::string> RETIRED_FLAGS_gm_mount_point;
|
||||||
|
absl::flags_internal::RetiredFlag<bool> RETIRED_FLAGS_allow_edge;
|
||||||
|
|
||||||
|
const auto RETIRED_FLAGS_REG_session_ports =
|
||||||
|
(RETIRED_FLAGS_session_ports.Retire("session-ports"),
|
||||||
|
::absl::flags_internal::FlagRegistrarEmpty{});
|
||||||
|
const auto RETIRED_FLAGS_REG_gm_mount_point =
|
||||||
|
(RETIRED_FLAGS_gm_mount_point.Retire("gamelet-mount-point"),
|
||||||
|
::absl::flags_internal::FlagRegistrarEmpty{});
|
||||||
|
const auto RETIRED_FLAGS_REG_allow_edge =
|
||||||
|
(RETIRED_FLAGS_allow_edge.Retire("allow-edge"),
|
||||||
|
::absl::flags_internal::FlagRegistrarEmpty{});
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
AssetStreamConfig::AssetStreamConfig() {
|
||||||
|
src_dir_ = absl::GetFlag(FLAGS_src_dir);
|
||||||
|
instance_ip_ = absl::GetFlag(FLAGS_instance_ip);
|
||||||
|
instance_port_ = absl::GetFlag(FLAGS_instance_port);
|
||||||
|
session_cfg_.verbosity = absl::GetFlag(FLAGS_verbosity);
|
||||||
|
session_cfg_.fuse_debug = absl::GetFlag(FLAGS_debug);
|
||||||
|
session_cfg_.fuse_singlethreaded = absl::GetFlag(FLAGS_singlethreaded);
|
||||||
|
session_cfg_.stats = absl::GetFlag(FLAGS_stats);
|
||||||
|
session_cfg_.quiet = absl::GetFlag(FLAGS_quiet);
|
||||||
|
session_cfg_.fuse_check = absl::GetFlag(FLAGS_check);
|
||||||
|
log_to_stdout_ = absl::GetFlag(FLAGS_log_to_stdout);
|
||||||
|
session_cfg_.fuse_cache_capacity = absl::GetFlag(FLAGS_cache_capacity).Size();
|
||||||
|
session_cfg_.fuse_cleanup_timeout_sec = absl::GetFlag(FLAGS_cleanup_timeout);
|
||||||
|
session_cfg_.fuse_access_idle_timeout_sec =
|
||||||
|
absl::GetFlag(FLAGS_access_idle_timeout);
|
||||||
|
session_cfg_.manifest_updater_threads =
|
||||||
|
absl::GetFlag(FLAGS_manifest_updater_threads);
|
||||||
|
session_cfg_.file_change_wait_duration_ms =
|
||||||
|
absl::GetFlag(FLAGS_file_change_wait_duration_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
AssetStreamConfig::~AssetStreamConfig() = default;
|
||||||
|
|
||||||
|
absl::Status AssetStreamConfig::LoadFromFile(const std::string& path) {
|
||||||
|
Buffer buffer;
|
||||||
|
RETURN_IF_ERROR(path::ReadFile(path, &buffer));
|
||||||
|
|
||||||
|
Json::Value config;
|
||||||
|
Json::Reader reader;
|
||||||
|
if (!reader.parse(buffer.data(), buffer.data() + buffer.size(), config,
|
||||||
|
false)) {
|
||||||
|
return absl::InvalidArgumentError(
|
||||||
|
absl::StrFormat("Failed to parse config file '%s': %s", path,
|
||||||
|
reader.getFormattedErrorMessages()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ASSIGN_VAR(var, flag, type) \
|
||||||
|
do { \
|
||||||
|
if (config.isMember(#flag)) { \
|
||||||
|
var = config[#flag].as##type(); \
|
||||||
|
flags_read_from_file_.insert(#flag); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
ASSIGN_VAR(src_dir_, src_dir, String);
|
||||||
|
ASSIGN_VAR(session_cfg_.verbosity, verbosity, Int);
|
||||||
|
ASSIGN_VAR(session_cfg_.fuse_debug, debug, Bool);
|
||||||
|
ASSIGN_VAR(session_cfg_.fuse_singlethreaded, singlethreaded, Bool);
|
||||||
|
ASSIGN_VAR(session_cfg_.stats, stats, Bool);
|
||||||
|
ASSIGN_VAR(session_cfg_.quiet, quiet, Bool);
|
||||||
|
ASSIGN_VAR(session_cfg_.fuse_check, check, Bool);
|
||||||
|
ASSIGN_VAR(log_to_stdout_, log_to_stdout, Bool);
|
||||||
|
ASSIGN_VAR(session_cfg_.fuse_cleanup_timeout_sec, cleanup_timeout, Int);
|
||||||
|
ASSIGN_VAR(session_cfg_.fuse_access_idle_timeout_sec, access_idle_timeout,
|
||||||
|
Int);
|
||||||
|
ASSIGN_VAR(session_cfg_.manifest_updater_threads, manifest_updater_threads,
|
||||||
|
Int);
|
||||||
|
ASSIGN_VAR(session_cfg_.file_change_wait_duration_ms,
|
||||||
|
file_change_wait_duration_ms, Int);
|
||||||
|
|
||||||
|
// cache_capacity requires Jedec size conversion.
|
||||||
|
constexpr char kCacheCapacity[] = "cache_capacity";
|
||||||
|
if (config.isMember(kCacheCapacity)) {
|
||||||
|
JedecSize cache_capacity;
|
||||||
|
std::string error;
|
||||||
|
if (AbslParseFlag(config[kCacheCapacity].asString(), &cache_capacity,
|
||||||
|
&error)) {
|
||||||
|
session_cfg_.fuse_cache_capacity = cache_capacity.Size();
|
||||||
|
flags_read_from_file_.insert(kCacheCapacity);
|
||||||
|
} else {
|
||||||
|
// Note that |error| can't be logged here since this code runs before
|
||||||
|
// logging is initialized.
|
||||||
|
flag_read_errors_[kCacheCapacity] = error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef ASSIGN_VAR
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
std::string AssetStreamConfig::ToString() {
|
||||||
|
std::ostringstream ss;
|
||||||
|
ss << "src_dir = " << src_dir_ << std::endl;
|
||||||
|
ss << "verbosity = " << session_cfg_.verbosity
|
||||||
|
<< std::endl;
|
||||||
|
ss << "debug = " << session_cfg_.fuse_debug
|
||||||
|
<< std::endl;
|
||||||
|
ss << "singlethreaded = " << session_cfg_.fuse_singlethreaded
|
||||||
|
<< std::endl;
|
||||||
|
ss << "stats = " << session_cfg_.stats << std::endl;
|
||||||
|
ss << "quiet = " << session_cfg_.quiet << std::endl;
|
||||||
|
ss << "check = " << session_cfg_.fuse_check
|
||||||
|
<< std::endl;
|
||||||
|
ss << "log_to_stdout = " << log_to_stdout_ << std::endl;
|
||||||
|
ss << "cache_capacity = " << session_cfg_.fuse_cache_capacity
|
||||||
|
<< std::endl;
|
||||||
|
ss << "cleanup_timeout = "
|
||||||
|
<< session_cfg_.fuse_cleanup_timeout_sec << std::endl;
|
||||||
|
ss << "access_idle_timeout = "
|
||||||
|
<< session_cfg_.fuse_access_idle_timeout_sec << std::endl;
|
||||||
|
ss << "manifest_updater_threads = "
|
||||||
|
<< session_cfg_.manifest_updater_threads << std::endl;
|
||||||
|
ss << "file_change_wait_duration_ms = "
|
||||||
|
<< session_cfg_.file_change_wait_duration_ms << std::endl;
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string AssetStreamConfig::GetFlagsReadFromFile() {
|
||||||
|
return absl::StrJoin(flags_read_from_file_, ", ");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string AssetStreamConfig::GetFlagReadErrors() {
|
||||||
|
std::string error_str;
|
||||||
|
for (const auto& [flag, error] : flag_read_errors_)
|
||||||
|
error_str += absl::StrFormat("%sFailed to read '%s': %s",
|
||||||
|
error_str.empty() ? "" : "\n", flag, error);
|
||||||
|
return error_str;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
107
asset_stream_manager/asset_stream_config.h
Normal file
107
asset_stream_manager/asset_stream_config.h
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_ASSET_STREAM_CONFIG_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_ASSET_STREAM_CONFIG_H_
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <set>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "asset_stream_manager/session_config.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Class containing all configuration settings for asset streaming.
|
||||||
|
// Reads flags from the command line and optionally applies overrides from
|
||||||
|
// a json file.
|
||||||
|
class AssetStreamConfig {
|
||||||
|
public:
|
||||||
|
// Constructs the configuration by applying command line flags.
|
||||||
|
AssetStreamConfig();
|
||||||
|
~AssetStreamConfig();
|
||||||
|
|
||||||
|
// Loads a configuration from the JSON file at |path| and overrides any config
|
||||||
|
// values that are set in this file. Sample json file:
|
||||||
|
// {
|
||||||
|
// "src_dir":"C:\\path\\to\\assets",
|
||||||
|
// "verbosity":3,
|
||||||
|
// "debug":0,
|
||||||
|
// "singlethreaded":0,
|
||||||
|
// "stats":0,
|
||||||
|
// "quiet":0,
|
||||||
|
// "check":0,
|
||||||
|
// "log_to_stdout":0,
|
||||||
|
// "cache_capacity":"150G",
|
||||||
|
// "cleanup_timeout":300,
|
||||||
|
// "access_idle_timeout":5,
|
||||||
|
// "manifest_updater_threads":4,
|
||||||
|
// "file_change_wait_duration_ms":500
|
||||||
|
// }
|
||||||
|
// Returns NotFoundError if the file does not exist.
|
||||||
|
// Returns InvalidArgumentError if the file is not valid JSON.
|
||||||
|
absl::Status LoadFromFile(const std::string& path);
|
||||||
|
|
||||||
|
// Returns a string with all config values, suitable for logging.
|
||||||
|
std::string ToString();
|
||||||
|
|
||||||
|
// Gets a comma-separated list of flags that were read from the JSON file.
|
||||||
|
// These flags override command line flags.
|
||||||
|
std::string GetFlagsReadFromFile();
|
||||||
|
|
||||||
|
// Gets a newline-separated list of errors for each flag that could not be
|
||||||
|
// read from the JSON file.
|
||||||
|
std::string GetFlagReadErrors();
|
||||||
|
|
||||||
|
// Workstation directory to stream. Should usually be empty since mounts are
|
||||||
|
// triggered by the CLI or the partner portal via a gRPC call, but useful
|
||||||
|
// during development.
|
||||||
|
const std::string& src_dir() const { return src_dir_; }
|
||||||
|
|
||||||
|
// IP address of the instance to stream to. Should usually be empty since
|
||||||
|
// mounts are triggered by the CLI or the partner portal via a gRPC call, but
|
||||||
|
// useful during development.
|
||||||
|
const std::string& instance_ip() const { return instance_ip_; }
|
||||||
|
|
||||||
|
// IP address of the instance to stream to. Should usually be unset (0) since
|
||||||
|
// mounts are triggered by the CLI or the partner portal via a gRPC call, but
|
||||||
|
// useful during development.
|
||||||
|
const uint16_t instance_port() const { return instance_port_; }
|
||||||
|
|
||||||
|
// Session configuration.
|
||||||
|
const SessionConfig session_cfg() const { return session_cfg_; }
|
||||||
|
|
||||||
|
// Whether to log to a file or to stdout.
|
||||||
|
bool log_to_stdout() const { return log_to_stdout_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string src_dir_;
|
||||||
|
std::string instance_ip_;
|
||||||
|
uint16_t instance_port_ = 0;
|
||||||
|
SessionConfig session_cfg_;
|
||||||
|
bool log_to_stdout_ = false;
|
||||||
|
|
||||||
|
// Use a set, so the flags are sorted alphabetically.
|
||||||
|
std::set<std::string> flags_read_from_file_;
|
||||||
|
|
||||||
|
// Maps flags to errors occurred while reading this flag.
|
||||||
|
std::map<std::string, std::string> flag_read_errors_;
|
||||||
|
};
|
||||||
|
|
||||||
|
}; // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_ASSET_STREAM_CONFIG_H_
|
||||||
90
asset_stream_manager/asset_stream_manager.vcxproj
Normal file
90
asset_stream_manager/asset_stream_manager.vcxproj
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||||
|
<ItemGroup Label="ProjectConfigurations">
|
||||||
|
<ProjectConfiguration Include="Debug|x64">
|
||||||
|
<Configuration>Debug</Configuration>
|
||||||
|
<Platform>x64</Platform>
|
||||||
|
</ProjectConfiguration>
|
||||||
|
<ProjectConfiguration Include="Release|x64">
|
||||||
|
<Configuration>Release</Configuration>
|
||||||
|
<Platform>x64</Platform>
|
||||||
|
</ProjectConfiguration>
|
||||||
|
</ItemGroup>
|
||||||
|
<PropertyGroup Label="Globals">
|
||||||
|
<VCProjectVersion>15.0</VCProjectVersion>
|
||||||
|
<ProjectGuid>{84D81562-D66C-4A60-9F48-2696D7D81D26}</ProjectGuid>
|
||||||
|
<Keyword>Win32Proj</Keyword>
|
||||||
|
<RootNamespace>cdc_rsync</RootNamespace>
|
||||||
|
<WindowsTargetPlatformVersion Condition="$(VisualStudioVersion) == 15">$([Microsoft.Build.Utilities.ToolLocationHelper]::GetLatestSDKTargetPlatformVersion('Windows', '10.0'))</WindowsTargetPlatformVersion>
|
||||||
|
<WindowsTargetPlatformVersion Condition="$(VisualStudioVersion) == 16">10.0</WindowsTargetPlatformVersion>
|
||||||
|
</PropertyGroup>
|
||||||
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||||
|
<ConfigurationType>Makefile</ConfigurationType>
|
||||||
|
<UseDebugLibraries>true</UseDebugLibraries>
|
||||||
|
<PlatformToolset Condition="$(VisualStudioVersion) == 15">v141</PlatformToolset>
|
||||||
|
<PlatformToolset Condition="$(VisualStudioVersion) == 16">v142</PlatformToolset>
|
||||||
|
</PropertyGroup>
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||||
|
<ConfigurationType>Makefile</ConfigurationType>
|
||||||
|
<UseDebugLibraries>false</UseDebugLibraries>
|
||||||
|
<PlatformToolset Condition="$(VisualStudioVersion) == 15">v141</PlatformToolset>
|
||||||
|
<PlatformToolset Condition="$(VisualStudioVersion) == 16">v142</PlatformToolset>
|
||||||
|
</PropertyGroup>
|
||||||
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||||
|
<ImportGroup Label="Shared">
|
||||||
|
<Import Project="..\all_files.vcxitems" Label="Shared" />
|
||||||
|
</ImportGroup>
|
||||||
|
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||||
|
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||||
|
</ImportGroup>
|
||||||
|
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||||
|
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||||
|
</ImportGroup>
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||||
|
<OutDir>$(SolutionDir)bazel-out\x64_windows-dbg\bin\asset_stream_manager\</OutDir>
|
||||||
|
<NMakePreprocessorDefinitions>UNICODE</NMakePreprocessorDefinitions>
|
||||||
|
<AdditionalOptions>/std:c++17</AdditionalOptions>
|
||||||
|
</PropertyGroup>
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||||
|
<OutDir>$(SolutionDir)bazel-out\x64_windows-opt\bin\asset_stream_manager\</OutDir>
|
||||||
|
<NMakePreprocessorDefinitions>UNICODE</NMakePreprocessorDefinitions>
|
||||||
|
<AdditionalOptions>/std:c++17</AdditionalOptions>
|
||||||
|
</PropertyGroup>
|
||||||
|
<ItemGroup>
|
||||||
|
<ProjectReference Include="..\cdc_fuse_fs\cdc_fuse_fs.vcxproj">
|
||||||
|
<Project>{a537310c-0571-43d5-b7fe-c867f702294f}</Project>
|
||||||
|
<ReferenceOutputAssembly>false</ReferenceOutputAssembly>
|
||||||
|
<LinkLibraryDependencies>false</LinkLibraryDependencies>
|
||||||
|
</ProjectReference>
|
||||||
|
</ItemGroup>
|
||||||
|
<!-- Prevent console from being closed -->
|
||||||
|
<ItemDefinitionGroup>
|
||||||
|
<Link>
|
||||||
|
<SubSystem>Console</SubSystem>
|
||||||
|
</Link>
|
||||||
|
</ItemDefinitionGroup>
|
||||||
|
<!-- Bazel setup -->
|
||||||
|
<PropertyGroup>
|
||||||
|
<BazelTargets>//asset_stream_manager</BazelTargets>
|
||||||
|
<BazelOutputFile>asset_stream_manager.exe</BazelOutputFile>
|
||||||
|
<BazelIncludePaths>..\;..\third_party\absl;..\third_party\jsoncpp\include;..\third_party\blake3\c;..\third_party\googletest\googletest\include;..\third_party\protobuf\src;..\third_party\grpc\include;..\bazel-out\x64_windows-dbg\bin;$(VC_IncludePath);$(WindowsSDK_IncludePath)</BazelIncludePaths>
|
||||||
|
<BazelSourcePathPrefix>..\/</BazelSourcePathPrefix>
|
||||||
|
</PropertyGroup>
|
||||||
|
<Import Project="..\NMakeBazelProject.targets" />
|
||||||
|
<!-- For some reason, msbuild doesn't include this file, so copy it explicitly. -->
|
||||||
|
<!-- TODO: Reenable once we can cross-compile these.
|
||||||
|
<PropertyGroup>
|
||||||
|
<CdcFuseFsFile>$(SolutionDir)bazel-out\k8-$(BazelCompilationMode)\bin\cdc_fuse_fs\cdc_fuse_fs</CdcFuseFsFile>
|
||||||
|
<LibFuseFile>$(SolutionDir)bazel-out\k8-$(BazelCompilationMode)\bin\third_party\fuse\libfuse.so</LibFuseFile>
|
||||||
|
</PropertyGroup>
|
||||||
|
<Target Name="CopyCdcFuseFs" Inputs="$(CdcFuseFsFile)" Outputs="$(OutDir)cdc_fuse_fs" AfterTargets="Build">
|
||||||
|
<Copy SourceFiles="$(CdcFuseFsFile)" DestinationFiles="$(OutDir)cdc_fuse_fs" />
|
||||||
|
</Target>
|
||||||
|
<Target Name="CopyLibFuse" Inputs="$(LibFuseFile)" Outputs="$(OutDir)libfuse.so" AfterTargets="Build">
|
||||||
|
<Copy SourceFiles="$(LibFuseFile)" DestinationFiles="$(OutDir)libfuse.so" />
|
||||||
|
</Target> -->
|
||||||
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||||
|
<ImportGroup Label="ExtensionTargets">
|
||||||
|
</ImportGroup>
|
||||||
|
</Project>
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" />
|
||||||
41
asset_stream_manager/asset_stream_server.cc
Normal file
41
asset_stream_manager/asset_stream_server.cc
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/asset_stream_server.h"
|
||||||
|
|
||||||
|
#include "asset_stream_manager/grpc_asset_stream_server.h"
|
||||||
|
#include "asset_stream_manager/testing_asset_stream_server.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
AssetStreamServer::AssetStreamServer(std::string src_dir,
|
||||||
|
DataStoreReader* data_store_reader,
|
||||||
|
FileChunkMap* file_chunks) {}
|
||||||
|
|
||||||
|
std::unique_ptr<AssetStreamServer> AssetStreamServer::Create(
|
||||||
|
AssetStreamServerType type, std::string src_dir,
|
||||||
|
DataStoreReader* data_store_reader, FileChunkMap* file_chunks,
|
||||||
|
ContentSentHandler content_sent) {
|
||||||
|
switch (type) {
|
||||||
|
case AssetStreamServerType::kGrpc:
|
||||||
|
return std::make_unique<GrpcAssetStreamServer>(src_dir, data_store_reader,
|
||||||
|
file_chunks, content_sent);
|
||||||
|
case AssetStreamServerType::kTest:
|
||||||
|
return std::make_unique<TestingAssetStreamServer>(
|
||||||
|
src_dir, data_store_reader, file_chunks);
|
||||||
|
}
|
||||||
|
assert(false);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
} // namespace cdc_ft
|
||||||
91
asset_stream_manager/asset_stream_server.h
Normal file
91
asset_stream_manager/asset_stream_server.h
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_ASSET_STREAM_SERVER_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_ASSET_STREAM_SERVER_H_
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/time/time.h"
|
||||||
|
#include "manifest/manifest_proto_defs.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Handles an event when content is transmitted from the workstation to a
|
||||||
|
// gamelet.
|
||||||
|
// |byte_count| number of bytes transferred during the session so far.
|
||||||
|
// |chunk_count| number of chunks transferred during the session so far.
|
||||||
|
// |instance_id| instance id, which identifies the session.
|
||||||
|
using ContentSentHandler = std::function<void(
|
||||||
|
size_t byte_count, size_t chunk_count, std::string instance_id)>;
|
||||||
|
|
||||||
|
class DataStoreReader;
|
||||||
|
class FileChunkMap;
|
||||||
|
|
||||||
|
enum class AssetStreamServerType { kGrpc, kTest };
|
||||||
|
|
||||||
|
class AssetStreamServer {
|
||||||
|
public:
|
||||||
|
// Returns AssetStreamServer of |type|.
|
||||||
|
// |src_dir| is the directory on the workstation to mount.
|
||||||
|
// |data_store_reader| is responsible for loading content by ID.
|
||||||
|
// |file_chunks| is used for mapping data chunk ids to file locations.
|
||||||
|
// |content_sent| handles event when data is transferred from the workstation
|
||||||
|
// to a gamelet.
|
||||||
|
static std::unique_ptr<AssetStreamServer> Create(
|
||||||
|
AssetStreamServerType type, std::string src_dir,
|
||||||
|
DataStoreReader* data_store_reader, FileChunkMap* file_chunks,
|
||||||
|
ContentSentHandler content_sent);
|
||||||
|
|
||||||
|
AssetStreamServer(const AssetStreamServer& other) = delete;
|
||||||
|
AssetStreamServer& operator=(const AssetStreamServer& other) = delete;
|
||||||
|
virtual ~AssetStreamServer() = default;
|
||||||
|
|
||||||
|
// Starts the asset stream server on the given |port|.
|
||||||
|
// Asserts that the server is not yet running.
|
||||||
|
virtual absl::Status Start(int port) = 0;
|
||||||
|
|
||||||
|
// Sets |manifest_id| to be distributed to gamelets.
|
||||||
|
// Thread-safe.
|
||||||
|
virtual void SetManifestId(const ContentIdProto& manifest_id) = 0;
|
||||||
|
|
||||||
|
// Waits until the FUSE for the given |instance| id has acknowledged the
|
||||||
|
// reception of the currently set manifest id. Returns a DeadlineExceeded
|
||||||
|
// error if the ack is not received within the given |timeout|.
|
||||||
|
// Thread-safe.
|
||||||
|
virtual absl::Status WaitForManifestAck(const std::string& instance,
|
||||||
|
absl::Duration timeout) = 0;
|
||||||
|
|
||||||
|
// Stops internal services and waits for the server to shut down.
|
||||||
|
virtual void Shutdown() = 0;
|
||||||
|
|
||||||
|
// Returns the used manifest id.
|
||||||
|
// Thread-safe.
|
||||||
|
virtual ContentIdProto GetManifestId() const = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Creates a new asset streaming server.
|
||||||
|
// |src_dir| is the directory on the workstation to mount.
|
||||||
|
// |data_store_reader| is responsible for loading content by ID.
|
||||||
|
// |file_chunks| is used for mapping data chunk ids to file locations.
|
||||||
|
AssetStreamServer(std::string src_dir, DataStoreReader* data_store_reader,
|
||||||
|
FileChunkMap* file_chunks);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_ASSET_STREAM_SERVER_H_
|
||||||
56
asset_stream_manager/background_service_impl.cc
Normal file
56
asset_stream_manager/background_service_impl.cc
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/background_service_impl.h"
|
||||||
|
|
||||||
|
#include "common/grpc_status.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/util.h"
|
||||||
|
#include "grpcpp/grpcpp.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
BackgroundServiceImpl::BackgroundServiceImpl() {}
|
||||||
|
|
||||||
|
BackgroundServiceImpl::~BackgroundServiceImpl() = default;
|
||||||
|
|
||||||
|
void BackgroundServiceImpl::SetExitCallback(ExitCallback exit_callback) {
|
||||||
|
exit_callback_ = std::move(exit_callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status BackgroundServiceImpl::Exit(grpc::ServerContext* context,
|
||||||
|
const ExitRequest* request,
|
||||||
|
ExitResponse* response) {
|
||||||
|
LOG_INFO("RPC:Exit");
|
||||||
|
if (exit_callback_) {
|
||||||
|
return ToGrpcStatus(exit_callback_());
|
||||||
|
}
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status BackgroundServiceImpl::GetPid(grpc::ServerContext* context,
|
||||||
|
const GetPidRequest* request,
|
||||||
|
GetPidResponse* response) {
|
||||||
|
LOG_INFO("RPC:GetPid");
|
||||||
|
response->set_pid(static_cast<int32_t>(Util::GetPid()));
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status BackgroundServiceImpl::HealthCheck(grpc::ServerContext* context,
|
||||||
|
const EmptyProto* request,
|
||||||
|
EmptyProto* response) {
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
68
asset_stream_manager/background_service_impl.h
Normal file
68
asset_stream_manager/background_service_impl.h
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_BACKGROUND_SERVICE_IMPL_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_BACKGROUND_SERVICE_IMPL_H_
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "asset_stream_manager/background_service_impl.h"
|
||||||
|
#include "asset_stream_manager/session_management_server.h"
|
||||||
|
#include "grpcpp/grpcpp.h"
|
||||||
|
#include "proto/background_service.grpc.pb.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Implements a service to manage a background process as a server.
|
||||||
|
// The corresponding client is implemented by ProcessManager. The background
|
||||||
|
// process in this case is asset_stream_manager. ProcessManager starts the
|
||||||
|
// process on demand (e.g. when `ggp instance mount --local-dir` is invoked) and
|
||||||
|
// manages its lifetime: It calls GetPid() initially, HealthCheck() periodically
|
||||||
|
// to monitor the process, and Exit() on shutdown.
|
||||||
|
// This service is owned by SessionManagementServer.
|
||||||
|
class BackgroundServiceImpl final
|
||||||
|
: public backgroundservice::BackgroundService::Service {
|
||||||
|
public:
|
||||||
|
using ExitRequest = backgroundservice::ExitRequest;
|
||||||
|
using ExitResponse = backgroundservice::ExitResponse;
|
||||||
|
using GetPidRequest = backgroundservice::GetPidRequest;
|
||||||
|
using GetPidResponse = backgroundservice::GetPidResponse;
|
||||||
|
using EmptyProto = google::protobuf::Empty;
|
||||||
|
|
||||||
|
BackgroundServiceImpl();
|
||||||
|
~BackgroundServiceImpl();
|
||||||
|
|
||||||
|
// Exit callback gets called from the Exit() RPC.
|
||||||
|
using ExitCallback = std::function<absl::Status()>;
|
||||||
|
void SetExitCallback(ExitCallback exit_callback);
|
||||||
|
|
||||||
|
grpc::Status Exit(grpc::ServerContext* context, const ExitRequest* request,
|
||||||
|
ExitResponse* response) override;
|
||||||
|
|
||||||
|
grpc::Status GetPid(grpc::ServerContext* context,
|
||||||
|
const GetPidRequest* request,
|
||||||
|
GetPidResponse* response) override;
|
||||||
|
|
||||||
|
grpc::Status HealthCheck(grpc::ServerContext* context,
|
||||||
|
const EmptyProto* request,
|
||||||
|
EmptyProto* response) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ExitCallback exit_callback_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_BACKGROUND_SERVICE_IMPL_H_
|
||||||
225
asset_stream_manager/cdc_fuse_manager.cc
Normal file
225
asset_stream_manager/cdc_fuse_manager.cc
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/cdc_fuse_manager.h"
|
||||||
|
|
||||||
|
#include "absl/strings/match.h"
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "cdc_fuse_fs/constants.h"
|
||||||
|
#include "common/gamelet_component.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr char kFuseFilename[] = "cdc_fuse_fs";
|
||||||
|
constexpr char kLibFuseFilename[] = "libfuse.so";
|
||||||
|
constexpr char kFuseStdoutPrefix[] = "cdc_fuse_fs_stdout";
|
||||||
|
constexpr char kRemoteToolsBinDir[] = "/opt/developer/tools/bin/";
|
||||||
|
|
||||||
|
// Mount point for FUSE on the gamelet.
|
||||||
|
constexpr char kMountDir[] = "/mnt/workstation";
|
||||||
|
|
||||||
|
// Cache directory on the gamelet to store data chunks.
|
||||||
|
constexpr char kCacheDir[] = "/var/cache/asset_streaming";
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
CdcFuseManager::CdcFuseManager(std::string instance,
|
||||||
|
ProcessFactory* process_factory,
|
||||||
|
RemoteUtil* remote_util)
|
||||||
|
: instance_(std::move(instance)),
|
||||||
|
process_factory_(process_factory),
|
||||||
|
remote_util_(remote_util) {}
|
||||||
|
|
||||||
|
CdcFuseManager::~CdcFuseManager() = default;
|
||||||
|
|
||||||
|
absl::Status CdcFuseManager::Deploy() {
|
||||||
|
assert(!fuse_process_);
|
||||||
|
|
||||||
|
LOG_INFO("Deploying FUSE...");
|
||||||
|
|
||||||
|
std::string exe_dir;
|
||||||
|
RETURN_IF_ERROR(path::GetExeDir(&exe_dir), "Failed to get exe directory");
|
||||||
|
|
||||||
|
std::string local_exe_path = path::Join(exe_dir, kFuseFilename);
|
||||||
|
std::string local_lib_path = path::Join(exe_dir, kLibFuseFilename);
|
||||||
|
|
||||||
|
#ifdef _DEBUG
|
||||||
|
// Sync FUSE to the gamelet in debug. Debug builds are rather large, so
|
||||||
|
// there's a gain from using sync.
|
||||||
|
LOG_DEBUG("Syncing FUSE");
|
||||||
|
RETURN_IF_ERROR(
|
||||||
|
remote_util_->Sync({local_exe_path, local_lib_path}, kRemoteToolsBinDir),
|
||||||
|
"Failed to sync FUSE to gamelet");
|
||||||
|
LOG_DEBUG("Syncing FUSE succeeded");
|
||||||
|
#else
|
||||||
|
// Copy FUSE to the gamelet. This is usually faster in production since it
|
||||||
|
// doesn't have to deploy ggp__server first.
|
||||||
|
LOG_DEBUG("Copying FUSE");
|
||||||
|
RETURN_IF_ERROR(remote_util_->Scp({local_exe_path, local_lib_path},
|
||||||
|
kRemoteToolsBinDir, true),
|
||||||
|
"Failed to copy FUSE to gamelet");
|
||||||
|
LOG_DEBUG("Copying FUSE succeeded");
|
||||||
|
|
||||||
|
// Make FUSE executable. Note that sync does it automatically.
|
||||||
|
LOG_DEBUG("Making FUSE executable");
|
||||||
|
std::string remotePath = path::JoinUnix(kRemoteToolsBinDir, kFuseFilename);
|
||||||
|
RETURN_IF_ERROR(remote_util_->Chmod("a+x", remotePath),
|
||||||
|
"Failed to set executable flag on FUSE");
|
||||||
|
LOG_DEBUG("Making FUSE succeeded");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status CdcFuseManager::Start(uint16_t local_port, uint16_t remote_port,
|
||||||
|
int verbosity, bool debug,
|
||||||
|
bool singlethreaded, bool enable_stats,
|
||||||
|
bool check, uint64_t cache_capacity,
|
||||||
|
uint32_t cleanup_timeout_sec,
|
||||||
|
uint32_t access_idle_timeout_sec) {
|
||||||
|
assert(!fuse_process_);
|
||||||
|
|
||||||
|
// Gather stats for the FUSE gamelet component to determine whether a
|
||||||
|
// re-deploy is necessary.
|
||||||
|
std::string exe_dir;
|
||||||
|
RETURN_IF_ERROR(path::GetExeDir(&exe_dir), "Failed to get exe directory");
|
||||||
|
std::vector<GameletComponent> components;
|
||||||
|
absl::Status status =
|
||||||
|
GameletComponent::Get({path::Join(exe_dir, kFuseFilename),
|
||||||
|
path::Join(exe_dir, kLibFuseFilename)},
|
||||||
|
&components);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return absl::NotFoundError(absl::StrFormat(
|
||||||
|
"Required gamelet component not found. Make sure the files %s and %s "
|
||||||
|
"reside in the same folder as stadia_assets_stream_manager_v3.exe.",
|
||||||
|
kFuseFilename, kLibFuseFilename));
|
||||||
|
}
|
||||||
|
std::string component_args = GameletComponent::ToCommandLineArgs(components);
|
||||||
|
|
||||||
|
// Build the remote command.
|
||||||
|
std::string remotePath = path::JoinUnix(kRemoteToolsBinDir, kFuseFilename);
|
||||||
|
std::string remote_command = absl::StrFormat(
|
||||||
|
"LD_LIBRARY_PATH=%s %s --instance='%s' "
|
||||||
|
"--components='%s' --port=%i --cache_dir=%s "
|
||||||
|
"--verbosity=%i --cleanup_timeout=%i --access_idle_timeout=%i --stats=%i "
|
||||||
|
"--check=%i --cache_capacity=%u -- -o allow_root -o ro -o nonempty -o "
|
||||||
|
"auto_unmount %s%s%s",
|
||||||
|
kRemoteToolsBinDir, remotePath, instance_, component_args, remote_port,
|
||||||
|
kCacheDir, verbosity, cleanup_timeout_sec, access_idle_timeout_sec,
|
||||||
|
enable_stats, check, cache_capacity, kMountDir, debug ? " -d" : "",
|
||||||
|
singlethreaded ? " -s" : "");
|
||||||
|
|
||||||
|
bool needs_deploy = false;
|
||||||
|
RETURN_IF_ERROR(
|
||||||
|
RunFuseProcess(local_port, remote_port, remote_command, &needs_deploy));
|
||||||
|
if (needs_deploy) {
|
||||||
|
// Deploy and try again.
|
||||||
|
RETURN_IF_ERROR(Deploy());
|
||||||
|
RETURN_IF_ERROR(
|
||||||
|
RunFuseProcess(local_port, remote_port, remote_command, &needs_deploy));
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status CdcFuseManager::RunFuseProcess(uint16_t local_port,
|
||||||
|
uint16_t remote_port,
|
||||||
|
const std::string& remote_command,
|
||||||
|
bool* needs_deploy) {
|
||||||
|
assert(!fuse_process_);
|
||||||
|
assert(needs_deploy);
|
||||||
|
*needs_deploy = false;
|
||||||
|
|
||||||
|
LOG_DEBUG("Running FUSE process");
|
||||||
|
ProcessStartInfo start_info =
|
||||||
|
remote_util_->BuildProcessStartInfoForSshPortForwardAndCommand(
|
||||||
|
local_port, remote_port, true, remote_command);
|
||||||
|
start_info.name = kFuseFilename;
|
||||||
|
|
||||||
|
// Capture stdout to determine whether a deploy is required.
|
||||||
|
fuse_stdout_.clear();
|
||||||
|
fuse_startup_finished_ = false;
|
||||||
|
start_info.stdout_handler = [this, needs_deploy](const char* data,
|
||||||
|
size_t size) {
|
||||||
|
return HandleFuseStdout(data, size, needs_deploy);
|
||||||
|
};
|
||||||
|
fuse_process_ = process_factory_->Create(start_info);
|
||||||
|
RETURN_IF_ERROR(fuse_process_->Start(), "Failed to start FUSE process");
|
||||||
|
LOG_DEBUG("FUSE process started. Waiting for startup to finish.");
|
||||||
|
|
||||||
|
// Run until process exits or startup finishes.
|
||||||
|
auto startup_finished = [this]() { return fuse_startup_finished_.load(); };
|
||||||
|
RETURN_IF_ERROR(fuse_process_->RunUntil(startup_finished),
|
||||||
|
"Failed to run FUSE process");
|
||||||
|
LOG_DEBUG("FUSE process startup complete.");
|
||||||
|
|
||||||
|
// If the FUSE process exited before it could perform its up-to-date check, it
|
||||||
|
// most likely happens because the binary does not exist and needs to be
|
||||||
|
// deployed.
|
||||||
|
*needs_deploy |= !fuse_startup_finished_ && fuse_process_->HasExited() &&
|
||||||
|
fuse_process_->ExitCode() != 0;
|
||||||
|
if (*needs_deploy) {
|
||||||
|
LOG_DEBUG("FUSE needs to be (re-)deployed.");
|
||||||
|
fuse_process_.reset();
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status CdcFuseManager::Stop() {
|
||||||
|
if (!fuse_process_) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_DEBUG("Terminating FUSE process");
|
||||||
|
absl::Status status = fuse_process_->Terminate();
|
||||||
|
fuse_process_.reset();
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CdcFuseManager::IsHealthy() const {
|
||||||
|
return fuse_process_ && !fuse_process_->HasExited();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status CdcFuseManager::HandleFuseStdout(const char* data, size_t size,
|
||||||
|
bool* needs_deploy) {
|
||||||
|
assert(needs_deploy);
|
||||||
|
|
||||||
|
// Don't capture stdout beyond startup.
|
||||||
|
if (!fuse_startup_finished_) {
|
||||||
|
fuse_stdout_.append(data, size);
|
||||||
|
// The gamelet component prints some magic strings to stdout to indicate
|
||||||
|
// whether it's up-to-date.
|
||||||
|
if (absl::StrContains(fuse_stdout_, kFuseUpToDate)) {
|
||||||
|
fuse_startup_finished_ = true;
|
||||||
|
} else if (absl::StrContains(fuse_stdout_, kFuseNotUpToDate)) {
|
||||||
|
fuse_startup_finished_ = true;
|
||||||
|
*needs_deploy = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!remote_util_->Quiet()) {
|
||||||
|
// Forward to logging.
|
||||||
|
return LogOutput(kFuseStdoutPrefix, data, size);
|
||||||
|
}
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
98
asset_stream_manager/cdc_fuse_manager.h
Normal file
98
asset_stream_manager/cdc_fuse_manager.h
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_CDC_FUSE_MANAGER_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_CDC_FUSE_MANAGER_H_
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "common/remote_util.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class Process;
|
||||||
|
class ProcessFactory;
|
||||||
|
class RemoteUtil;
|
||||||
|
|
||||||
|
// Manages the gamelet-side CDC FUSE filesystem process.
|
||||||
|
class CdcFuseManager {
|
||||||
|
public:
|
||||||
|
CdcFuseManager(std::string instance, ProcessFactory* process_factory,
|
||||||
|
RemoteUtil* remote_util);
|
||||||
|
~CdcFuseManager();
|
||||||
|
|
||||||
|
CdcFuseManager(CdcFuseManager&) = delete;
|
||||||
|
CdcFuseManager& operator=(CdcFuseManager&) = delete;
|
||||||
|
|
||||||
|
// Starts the CDC FUSE and establishes a reverse SSH tunnel from the gamelet's
|
||||||
|
// |remote_port| to the workstation's |local_port|. Deploys the binary if
|
||||||
|
// necessary.
|
||||||
|
//
|
||||||
|
// |verbosity| is the log verbosity used by the filesystem.
|
||||||
|
// |debug| puts the filesystem into debug mode if set to true. This also
|
||||||
|
// causes the process to run in the foreground, so that logs are piped through
|
||||||
|
// SSH to stdout of the workstation process.
|
||||||
|
// |singlethreaded| puts the filesystem into single-threaded mode if true.
|
||||||
|
// |enable_stats| determines whether FUSE should send debug statistics.
|
||||||
|
// |check| determines whether to execute FUSE consistency check.
|
||||||
|
// |cache_capacity| defines the cache capacity in bytes.
|
||||||
|
// |cleanup_timeout_sec| defines the data provider cleanup timeout in seconds.
|
||||||
|
// |access_idle_timeout_sec| defines the number of seconds after which data
|
||||||
|
// provider is considered to be access-idling.
|
||||||
|
absl::Status Start(uint16_t local_port, uint16_t remote_port, int verbosity,
|
||||||
|
bool debug, bool singlethreaded, bool enable_stats,
|
||||||
|
bool check, uint64_t cache_capacity,
|
||||||
|
uint32_t cleanup_timeout_sec,
|
||||||
|
uint32_t access_idle_timeout_sec);
|
||||||
|
|
||||||
|
// Stops the CDC FUSE.
|
||||||
|
absl::Status Stop();
|
||||||
|
|
||||||
|
// Returns true if the FUSE process is running.
|
||||||
|
bool IsHealthy() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Runs the FUSE process on the gamelet from the given |remote_command| and
|
||||||
|
// establishes a reverse SSH tunnel from the gamelet's |remote_port| to the
|
||||||
|
// workstation's |local_port|.
|
||||||
|
//
|
||||||
|
// If the FUSE is not up-to-date or does not exist, sets |needs_deploy| to
|
||||||
|
// true and returns OK. In that case, Deploy() needs to be called and the FUSE
|
||||||
|
// process should be run again.
|
||||||
|
absl::Status RunFuseProcess(uint16_t local_port, uint16_t remote_port,
|
||||||
|
const std::string& remote_command,
|
||||||
|
bool* needs_deploy);
|
||||||
|
|
||||||
|
// Deploys the gamelet components.
|
||||||
|
absl::Status Deploy();
|
||||||
|
|
||||||
|
// Output handler for FUSE's stdout. Sets |needs_deploy| to true if the output
|
||||||
|
// contains a magic marker to indicate that the binary has to be redeployed.
|
||||||
|
// Called in a background thread.
|
||||||
|
absl::Status HandleFuseStdout(const char* data, size_t size,
|
||||||
|
bool* needs_deploy);
|
||||||
|
|
||||||
|
std::string instance_;
|
||||||
|
ProcessFactory* const process_factory_;
|
||||||
|
RemoteUtil* const remote_util_;
|
||||||
|
|
||||||
|
std::unique_ptr<Process> fuse_process_;
|
||||||
|
std::string fuse_stdout_;
|
||||||
|
std::atomic<bool> fuse_startup_finished_{false};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_CDC_FUSE_MANAGER_H_
|
||||||
305
asset_stream_manager/grpc_asset_stream_server.cc
Normal file
305
asset_stream_manager/grpc_asset_stream_server.cc
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/grpc_asset_stream_server.h"
|
||||||
|
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "absl/time/time.h"
|
||||||
|
#include "common/grpc_status.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
#include "data_store/data_store_reader.h"
|
||||||
|
#include "grpcpp/grpcpp.h"
|
||||||
|
#include "manifest/file_chunk_map.h"
|
||||||
|
#include "proto/asset_stream_service.grpc.pb.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
using GetContentRequest = proto::GetContentRequest;
|
||||||
|
using GetContentResponse = proto::GetContentResponse;
|
||||||
|
using SendCachedContentIdsRequest = proto::SendCachedContentIdsRequest;
|
||||||
|
using SendCachedContentIdsResponse = proto::SendCachedContentIdsResponse;
|
||||||
|
using AssetStreamService = proto::AssetStreamService;
|
||||||
|
|
||||||
|
using GetManifestIdRequest = proto::GetManifestIdRequest;
|
||||||
|
using GetManifestIdResponse = proto::GetManifestIdResponse;
|
||||||
|
using AckManifestIdReceivedRequest = proto::AckManifestIdReceivedRequest;
|
||||||
|
using AckManifestIdReceivedResponse = proto::AckManifestIdReceivedResponse;
|
||||||
|
using ConfigStreamService = proto::ConfigStreamService;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
class AssetStreamServiceImpl final : public AssetStreamService::Service {
|
||||||
|
public:
|
||||||
|
AssetStreamServiceImpl(std::string src_dir,
|
||||||
|
DataStoreReader* data_store_reader,
|
||||||
|
FileChunkMap* file_chunks, InstanceIdMap* instance_ids,
|
||||||
|
ContentSentHandler content_sent)
|
||||||
|
: src_dir_(std::move(src_dir)),
|
||||||
|
data_store_reader_(data_store_reader),
|
||||||
|
file_chunks_(file_chunks),
|
||||||
|
started_(absl::Now()),
|
||||||
|
instance_ids_(instance_ids),
|
||||||
|
content_sent_(content_sent) {}
|
||||||
|
|
||||||
|
grpc::Status GetContent(grpc::ServerContext* context,
|
||||||
|
const GetContentRequest* request,
|
||||||
|
GetContentResponse* response) override {
|
||||||
|
// See if this is a data chunk first. The hash lookup is faster than the
|
||||||
|
// file lookup from the data store.
|
||||||
|
std::string rel_path;
|
||||||
|
uint64_t offset;
|
||||||
|
size_t size;
|
||||||
|
for (const ContentIdProto& id : request->id()) {
|
||||||
|
uint32_t uint32_size;
|
||||||
|
if (file_chunks_->Lookup(id, &rel_path, &offset, &uint32_size)) {
|
||||||
|
size = uint32_size;
|
||||||
|
// File data chunk.
|
||||||
|
RETURN_GRPC_IF_ERROR(ReadFromFile(id, rel_path, offset, uint32_size,
|
||||||
|
response->add_data()));
|
||||||
|
file_chunks_->RecordStreamedChunk(id, request->thread_id());
|
||||||
|
} else {
|
||||||
|
// Manifest chunk.
|
||||||
|
RETURN_GRPC_IF_ERROR(
|
||||||
|
ReadFromDataStore(id, response->add_data(), &size));
|
||||||
|
}
|
||||||
|
std::string instance_id = instance_ids_->Get(context->peer());
|
||||||
|
if (content_sent_ != nullptr) {
|
||||||
|
content_sent_(size, 1, instance_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status SendCachedContentIds(
|
||||||
|
grpc::ServerContext* context, const SendCachedContentIdsRequest* request,
|
||||||
|
SendCachedContentIdsResponse* response) override {
|
||||||
|
for (const ContentIdProto& id : request->id())
|
||||||
|
file_chunks_->RecordCachedChunk(id);
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
absl::Status ReadFromFile(const ContentIdProto& id,
|
||||||
|
const std::string& rel_path, uint64_t offset,
|
||||||
|
uint32_t size, std::string* data) {
|
||||||
|
std::string path = path::Join(src_dir_, rel_path);
|
||||||
|
path::FixPathSeparators(&path);
|
||||||
|
data->resize(size);
|
||||||
|
size_t read_size;
|
||||||
|
ASSIGN_OR_RETURN(
|
||||||
|
read_size,
|
||||||
|
path::ReadFile(path, const_cast<char*>(data->data()), offset, size),
|
||||||
|
"Failed to read chunk '%s', file '%s', offset %d, size %d",
|
||||||
|
ContentId::ToHexString(id), path, offset, size);
|
||||||
|
|
||||||
|
absl::Time now = absl::Now();
|
||||||
|
LOG_VERBOSE("'%s', %d, '%s', '%s', %u, %u",
|
||||||
|
absl::FormatTime("%H:%M:%S", now, absl::UTCTimeZone()),
|
||||||
|
absl::ToInt64Milliseconds(now - started_),
|
||||||
|
ContentId::ToHexString(id), path, offset, size);
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status ReadFromDataStore(const ContentIdProto& id, std::string* data,
|
||||||
|
size_t* size) {
|
||||||
|
Buffer buf;
|
||||||
|
RETURN_IF_ERROR(data_store_reader_->Get(id, &buf),
|
||||||
|
"Failed to read chunk '%s'", ContentId::ToHexString(id));
|
||||||
|
|
||||||
|
// TODO: Get rid of copy after the Buffer uses std::string.
|
||||||
|
*data = std::string(buf.data(), buf.size());
|
||||||
|
*size = buf.size();
|
||||||
|
absl::Time now = absl::Now();
|
||||||
|
LOG_VERBOSE("'%s', %d, '%s', %d",
|
||||||
|
absl::FormatTime("%H:%M:%S", now, absl::UTCTimeZone()),
|
||||||
|
absl::ToInt64Milliseconds(now - started_),
|
||||||
|
ContentId::ToHexString(id), buf.size());
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string src_dir_;
|
||||||
|
DataStoreReader* const data_store_reader_;
|
||||||
|
FileChunkMap* const file_chunks_;
|
||||||
|
const absl::Time started_;
|
||||||
|
InstanceIdMap* instance_ids_;
|
||||||
|
ContentSentHandler content_sent_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ConfigStreamServiceImpl final : public ConfigStreamService::Service {
|
||||||
|
public:
|
||||||
|
explicit ConfigStreamServiceImpl(InstanceIdMap* instance_ids)
|
||||||
|
: instance_ids_(instance_ids) {}
|
||||||
|
~ConfigStreamServiceImpl() { Shutdown(); }
|
||||||
|
|
||||||
|
grpc::Status GetManifestId(
|
||||||
|
grpc::ServerContext* context, const GetManifestIdRequest* request,
|
||||||
|
::grpc::ServerWriter<GetManifestIdResponse>* stream) override {
|
||||||
|
ContentIdProto local_id;
|
||||||
|
bool running = true;
|
||||||
|
do {
|
||||||
|
// Shutdown happened.
|
||||||
|
if (!WaitForUpdate(local_id)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
LOG_INFO("Sending updated manifest id '%s' to the gamelet",
|
||||||
|
ContentId::ToHexString(local_id));
|
||||||
|
GetManifestIdResponse response;
|
||||||
|
*response.mutable_id() = local_id;
|
||||||
|
bool success = stream->Write(response);
|
||||||
|
if (!success) {
|
||||||
|
LOG_WARNING("Failed to send updated manifest id '%s'",
|
||||||
|
ContentId::ToHexString(local_id));
|
||||||
|
}
|
||||||
|
absl::ReaderMutexLock lock(&mutex_);
|
||||||
|
running = running_;
|
||||||
|
} while (running);
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status AckManifestIdReceived(
|
||||||
|
grpc::ServerContext* context, const AckManifestIdReceivedRequest* request,
|
||||||
|
AckManifestIdReceivedResponse* response) override {
|
||||||
|
// Associate the peer with the gamelet ID.
|
||||||
|
instance_ids_->Set(context->peer(), request->gamelet_id());
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
acked_manifest_ids_[request->gamelet_id()] = request->manifest_id();
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetManifestId(const ContentIdProto& id) ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
LOG_INFO("Updating manifest id '%s' in configuration service",
|
||||||
|
ContentId::ToHexString(id));
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
id_ = id;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status WaitForManifestAck(const std::string& instance,
|
||||||
|
absl::Duration timeout) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
auto cond = [this, &instance]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
|
||||||
|
AckedManifestIdsMap::iterator iter = acked_manifest_ids_.find(instance);
|
||||||
|
return iter != acked_manifest_ids_.end() && id_ == iter->second;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!mutex_.AwaitWithTimeout(absl::Condition(&cond), timeout)) {
|
||||||
|
return absl::DeadlineExceededError(absl::StrFormat(
|
||||||
|
"Instance '%s' did not acknowledge reception of manifest", instance));
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Shutdown() ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
if (running_) {
|
||||||
|
LOG_INFO("Shutting down configuration service");
|
||||||
|
running_ = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ContentIdProto GetStoredManifestId() const ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
return id_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Returns false if the update process was cancelled.
|
||||||
|
bool WaitForUpdate(ContentIdProto& local_id) ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
auto cond = [&]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
|
||||||
|
return !running_ || local_id != id_;
|
||||||
|
};
|
||||||
|
mutex_.Await(absl::Condition(&cond));
|
||||||
|
local_id = id_;
|
||||||
|
return running_;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutable absl::Mutex mutex_;
|
||||||
|
ContentIdProto id_ ABSL_GUARDED_BY(mutex_);
|
||||||
|
bool running_ ABSL_GUARDED_BY(mutex_) = true;
|
||||||
|
InstanceIdMap* instance_ids_;
|
||||||
|
|
||||||
|
// Maps instance ids to the last acknowledged manifest id.
|
||||||
|
using AckedManifestIdsMap = std::unordered_map<std::string, ContentIdProto>;
|
||||||
|
AckedManifestIdsMap acked_manifest_ids_ ABSL_GUARDED_BY(mutex_);
|
||||||
|
};
|
||||||
|
|
||||||
|
GrpcAssetStreamServer::GrpcAssetStreamServer(std::string src_dir,
|
||||||
|
DataStoreReader* data_store_reader,
|
||||||
|
FileChunkMap* file_chunks,
|
||||||
|
ContentSentHandler content_sent)
|
||||||
|
: AssetStreamServer(src_dir, data_store_reader, file_chunks),
|
||||||
|
asset_stream_service_(std::make_unique<AssetStreamServiceImpl>(
|
||||||
|
std::move(src_dir), data_store_reader, file_chunks, &instance_ids_,
|
||||||
|
content_sent)),
|
||||||
|
config_stream_service_(
|
||||||
|
std::make_unique<ConfigStreamServiceImpl>(&instance_ids_)) {}
|
||||||
|
|
||||||
|
GrpcAssetStreamServer::~GrpcAssetStreamServer() = default;
|
||||||
|
|
||||||
|
absl::Status GrpcAssetStreamServer::Start(int port) {
|
||||||
|
assert(!server_);
|
||||||
|
|
||||||
|
std::string server_address = absl::StrFormat("localhost:%i", port);
|
||||||
|
grpc::ServerBuilder builder;
|
||||||
|
int selected_port = 0;
|
||||||
|
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials(),
|
||||||
|
&selected_port);
|
||||||
|
builder.RegisterService(asset_stream_service_.get());
|
||||||
|
builder.RegisterService(config_stream_service_.get());
|
||||||
|
server_ = builder.BuildAndStart();
|
||||||
|
if (selected_port != port) {
|
||||||
|
return MakeStatus(
|
||||||
|
"Failed to start streaming server: Could not listen on port %i. Is the "
|
||||||
|
"port in use?",
|
||||||
|
port);
|
||||||
|
}
|
||||||
|
if (!server_) return MakeStatus("Failed to start streaming server");
|
||||||
|
LOG_INFO("Streaming server listening on '%s'", server_address);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void GrpcAssetStreamServer::SetManifestId(const ContentIdProto& manifest_id) {
|
||||||
|
LOG_INFO("Setting manifest id '%s'", ContentId::ToHexString(manifest_id));
|
||||||
|
assert(config_stream_service_);
|
||||||
|
config_stream_service_->SetManifestId(manifest_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GrpcAssetStreamServer::WaitForManifestAck(
|
||||||
|
const std::string& instance, absl::Duration timeout) {
|
||||||
|
assert(config_stream_service_);
|
||||||
|
return config_stream_service_->WaitForManifestAck(instance, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GrpcAssetStreamServer::Shutdown() {
|
||||||
|
assert(config_stream_service_);
|
||||||
|
config_stream_service_->Shutdown();
|
||||||
|
if (server_) {
|
||||||
|
server_->Shutdown();
|
||||||
|
server_->Wait();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ContentIdProto GrpcAssetStreamServer::GetManifestId() const {
|
||||||
|
assert(config_stream_service_);
|
||||||
|
return config_stream_service_->GetStoredManifestId();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
69
asset_stream_manager/grpc_asset_stream_server.h
Normal file
69
asset_stream_manager/grpc_asset_stream_server.h
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_GRPC_ASSET_STREAM_SERVER_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_GRPC_ASSET_STREAM_SERVER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "asset_stream_manager/asset_stream_server.h"
|
||||||
|
#include "common/thread_safe_map.h"
|
||||||
|
|
||||||
|
namespace grpc {
|
||||||
|
class Server;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
using InstanceIdMap = ThreadSafeMap<std::string, std::string>;
|
||||||
|
|
||||||
|
class AssetStreamServiceImpl;
|
||||||
|
class ConfigStreamServiceImpl;
|
||||||
|
|
||||||
|
// gRpc server for streaming assets to one or more gamelets.
|
||||||
|
class GrpcAssetStreamServer : public AssetStreamServer {
|
||||||
|
public:
|
||||||
|
// Creates a new asset streaming gRpc server.
|
||||||
|
GrpcAssetStreamServer(std::string src_dir, DataStoreReader* data_store_reader,
|
||||||
|
FileChunkMap* file_chunks,
|
||||||
|
ContentSentHandler content_sent);
|
||||||
|
|
||||||
|
~GrpcAssetStreamServer();
|
||||||
|
|
||||||
|
// AssetStreamServer:
|
||||||
|
|
||||||
|
absl::Status Start(int port) override;
|
||||||
|
|
||||||
|
void SetManifestId(const ContentIdProto& manifest_id) override;
|
||||||
|
|
||||||
|
absl::Status WaitForManifestAck(const std::string& instance,
|
||||||
|
absl::Duration timeout) override;
|
||||||
|
|
||||||
|
void Shutdown() override;
|
||||||
|
|
||||||
|
ContentIdProto GetManifestId() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
InstanceIdMap instance_ids_;
|
||||||
|
const std::unique_ptr<AssetStreamServiceImpl> asset_stream_service_;
|
||||||
|
const std::unique_ptr<ConfigStreamServiceImpl> config_stream_service_;
|
||||||
|
std::unique_ptr<grpc::Server> server_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_GRPC_ASSET_STREAM_SERVER_H_
|
||||||
259
asset_stream_manager/local_assets_stream_manager_service_impl.cc
Normal file
259
asset_stream_manager/local_assets_stream_manager_service_impl.cc
Normal file
@@ -0,0 +1,259 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/local_assets_stream_manager_service_impl.h"
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "absl/strings/str_split.h"
|
||||||
|
#include "asset_stream_manager/multi_session.h"
|
||||||
|
#include "asset_stream_manager/session_manager.h"
|
||||||
|
#include "common/grpc_status.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/process.h"
|
||||||
|
#include "common/sdk_util.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "manifest/manifest_updater.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Parses |instance_name| of the form
|
||||||
|
// "organizations/{org-id}/projects/{proj-id}/pools/{pool-id}/gamelets/{gamelet-id}"
|
||||||
|
// into parts. The pool id is not returned.
|
||||||
|
bool ParseInstanceName(const std::string& instance_name,
|
||||||
|
std::string* instance_id, std::string* project_id,
|
||||||
|
std::string* organization_id) {
|
||||||
|
std::string pool_id;
|
||||||
|
std::vector<std::string> parts = absl::StrSplit(instance_name, '/');
|
||||||
|
if (parts.size() != 10) return false;
|
||||||
|
if (parts[0] != "organizations" || parts[1].empty()) return false;
|
||||||
|
if (parts[2] != "projects" || parts[3].empty()) return false;
|
||||||
|
if (parts[4] != "pools" || parts[5].empty()) return false;
|
||||||
|
// Instance id is e.g.
|
||||||
|
// edge/e-europe-west3-b/49d010c7be1845ac9a19a9033c64a460ces1
|
||||||
|
if (parts[6] != "gamelets" || parts[7].empty() || parts[8].empty() ||
|
||||||
|
parts[9].empty())
|
||||||
|
return false;
|
||||||
|
*organization_id = parts[1];
|
||||||
|
*project_id = parts[3];
|
||||||
|
*instance_id = absl::StrFormat("%s/%s/%s", parts[7], parts[8], parts[9]);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parses |data| line by line for "|key|: value" and puts the first instance in
|
||||||
|
// |value| if present. Returns false if |data| does not contain "|key|: value".
|
||||||
|
// Trims whitespace.
|
||||||
|
bool ParseValue(const std::string& data, const std::string& key,
|
||||||
|
std::string* value) {
|
||||||
|
std::istringstream stream(data);
|
||||||
|
|
||||||
|
std::string line;
|
||||||
|
while (std::getline(stream, line)) {
|
||||||
|
if (line.find(key + ":") == 0) {
|
||||||
|
// Trim value.
|
||||||
|
size_t start_pos = key.size() + 1;
|
||||||
|
while (start_pos < line.size() && isspace(line[start_pos])) {
|
||||||
|
start_pos++;
|
||||||
|
}
|
||||||
|
size_t end_pos = line.size();
|
||||||
|
while (end_pos > start_pos && isspace(line[end_pos - 1])) {
|
||||||
|
end_pos--;
|
||||||
|
}
|
||||||
|
*value = line.substr(start_pos, end_pos - start_pos);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Why oh why?
|
||||||
|
std::string Quoted(const std::string& s) {
|
||||||
|
std::ostringstream ss;
|
||||||
|
ss << std::quoted(s);
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
LocalAssetsStreamManagerServiceImpl::LocalAssetsStreamManagerServiceImpl(
|
||||||
|
SessionManager* session_manager, ProcessFactory* process_factory,
|
||||||
|
metrics::MetricsService* metrics_service)
|
||||||
|
: session_manager_(session_manager),
|
||||||
|
process_factory_(process_factory),
|
||||||
|
metrics_service_(metrics_service) {}
|
||||||
|
|
||||||
|
LocalAssetsStreamManagerServiceImpl::~LocalAssetsStreamManagerServiceImpl() =
|
||||||
|
default;
|
||||||
|
|
||||||
|
grpc::Status LocalAssetsStreamManagerServiceImpl::StartSession(
|
||||||
|
grpc::ServerContext* /*context*/, const StartSessionRequest* request,
|
||||||
|
StartSessionResponse* /*response*/) {
|
||||||
|
LOG_INFO("RPC:StartSession(gamelet_name='%s', workstation_directory='%s'",
|
||||||
|
request->gamelet_name(), request->workstation_directory());
|
||||||
|
|
||||||
|
metrics::DeveloperLogEvent evt;
|
||||||
|
evt.as_manager_data = std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
evt.as_manager_data->session_start_data =
|
||||||
|
std::make_unique<metrics::SessionStartData>();
|
||||||
|
evt.as_manager_data->session_start_data->absl_status = absl::StatusCode::kOk;
|
||||||
|
evt.as_manager_data->session_start_data->status =
|
||||||
|
metrics::SessionStartStatus::kOk;
|
||||||
|
evt.as_manager_data->session_start_data->origin =
|
||||||
|
ConvertOrigin(request->origin());
|
||||||
|
|
||||||
|
// Parse instance/project/org id.
|
||||||
|
absl::Status status;
|
||||||
|
MultiSession* ms = nullptr;
|
||||||
|
std::string instance_id, project_id, organization_id, instance_ip;
|
||||||
|
uint16_t instance_port = 0;
|
||||||
|
if (!ParseInstanceName(request->gamelet_name(), &instance_id, &project_id,
|
||||||
|
&organization_id)) {
|
||||||
|
status = absl::InvalidArgumentError(absl::StrFormat(
|
||||||
|
"Failed to parse instance name '%s'", request->gamelet_name()));
|
||||||
|
} else {
|
||||||
|
evt.project_id = project_id;
|
||||||
|
evt.organization_id = organization_id;
|
||||||
|
|
||||||
|
status = InitSsh(instance_id, project_id, organization_id, &instance_ip,
|
||||||
|
&instance_port);
|
||||||
|
|
||||||
|
if (status.ok()) {
|
||||||
|
status = session_manager_->StartSession(
|
||||||
|
instance_id, project_id, organization_id, instance_ip, instance_port,
|
||||||
|
request->workstation_directory(), &ms,
|
||||||
|
&evt.as_manager_data->session_start_data->status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
evt.as_manager_data->session_start_data->absl_status = status.code();
|
||||||
|
if (ms) {
|
||||||
|
evt.as_manager_data->session_start_data->concurrent_session_count =
|
||||||
|
ms->GetSessionCount();
|
||||||
|
if (!instance_id.empty() && ms->HasSessionForInstance(instance_id)) {
|
||||||
|
ms->RecordSessionEvent(std::move(evt), metrics::EventType::kSessionStart,
|
||||||
|
instance_id);
|
||||||
|
} else {
|
||||||
|
ms->RecordMultiSessionEvent(std::move(evt),
|
||||||
|
metrics::EventType::kSessionStart);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
metrics_service_->RecordEvent(std::move(evt),
|
||||||
|
metrics::EventType::kSessionStart);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status.ok()) {
|
||||||
|
LOG_INFO("StartSession() succeeded");
|
||||||
|
} else {
|
||||||
|
LOG_ERROR("StartSession() failed: %s", status.ToString());
|
||||||
|
}
|
||||||
|
return ToGrpcStatus(status);
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status LocalAssetsStreamManagerServiceImpl::StopSession(
|
||||||
|
grpc::ServerContext* /*context*/, const StopSessionRequest* request,
|
||||||
|
StopSessionResponse* /*response*/) {
|
||||||
|
LOG_INFO("RPC:StopSession(gamelet_id='%s')", request->gamelet_id());
|
||||||
|
|
||||||
|
absl::Status status = session_manager_->StopSession(request->gamelet_id());
|
||||||
|
if (status.ok()) {
|
||||||
|
LOG_INFO("StopSession() succeeded");
|
||||||
|
} else {
|
||||||
|
LOG_ERROR("StopSession() failed: %s", status.ToString());
|
||||||
|
}
|
||||||
|
return ToGrpcStatus(status);
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics::RequestOrigin LocalAssetsStreamManagerServiceImpl::ConvertOrigin(
|
||||||
|
StartSessionRequestOrigin origin) const {
|
||||||
|
switch (origin) {
|
||||||
|
case StartSessionRequest::ORIGIN_UNKNOWN:
|
||||||
|
return metrics::RequestOrigin::kUnknown;
|
||||||
|
case StartSessionRequest::ORIGIN_CLI:
|
||||||
|
return metrics::RequestOrigin::kCli;
|
||||||
|
case StartSessionRequest::ORIGIN_PARTNER_PORTAL:
|
||||||
|
return metrics::RequestOrigin::kPartnerPortal;
|
||||||
|
default:
|
||||||
|
return metrics::RequestOrigin::kUnknown;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status LocalAssetsStreamManagerServiceImpl::InitSsh(
|
||||||
|
const std::string& instance_id, const std::string& project_id,
|
||||||
|
const std::string& organization_id, std::string* instance_ip,
|
||||||
|
uint16_t* instance_port) {
|
||||||
|
SdkUtil sdk_util;
|
||||||
|
instance_ip->clear();
|
||||||
|
*instance_port = 0;
|
||||||
|
|
||||||
|
ProcessStartInfo start_info;
|
||||||
|
start_info.command = absl::StrFormat(
|
||||||
|
"%s ssh init", path::Join(sdk_util.GetDevBinPath(), "ggp"));
|
||||||
|
start_info.command += absl::StrFormat(" --instance %s", Quoted(instance_id));
|
||||||
|
if (!project_id.empty()) {
|
||||||
|
start_info.command += absl::StrFormat(" --project %s", Quoted(project_id));
|
||||||
|
}
|
||||||
|
if (!organization_id.empty()) {
|
||||||
|
start_info.command +=
|
||||||
|
absl::StrFormat(" --organization %s", Quoted(organization_id));
|
||||||
|
}
|
||||||
|
start_info.name = "ggp ssh init";
|
||||||
|
|
||||||
|
std::string output;
|
||||||
|
start_info.stdout_handler = [&output, this](const char* data,
|
||||||
|
size_t data_size) {
|
||||||
|
// Note: This is called from a background thread!
|
||||||
|
output.append(data, data_size);
|
||||||
|
return absl::OkStatus();
|
||||||
|
};
|
||||||
|
start_info.forward_output_to_log = true;
|
||||||
|
|
||||||
|
std::unique_ptr<Process> process = process_factory_->Create(start_info);
|
||||||
|
absl::Status status = process->Start();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to start ggp process");
|
||||||
|
}
|
||||||
|
|
||||||
|
status = process->RunUntilExit();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to run ggp process");
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t exit_code = process->ExitCode();
|
||||||
|
if (exit_code != 0) {
|
||||||
|
return MakeStatus("ggp process exited with code %u", exit_code);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse gamelet IP. Should be "Host: <instance_ip ip>".
|
||||||
|
if (!ParseValue(output, "Host", instance_ip)) {
|
||||||
|
return MakeStatus("Failed to parse host from ggp ssh init response\n%s",
|
||||||
|
output);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse ssh port. Should be "Port: <port>".
|
||||||
|
std::string port_string;
|
||||||
|
const bool result = ParseValue(output, "Port", &port_string);
|
||||||
|
int int_port = atoi(port_string.c_str());
|
||||||
|
if (!result || int_port == 0 || int_port <= 0 || int_port > UINT_MAX) {
|
||||||
|
return MakeStatus("Failed to parse ssh port from ggp ssh init response\n%s",
|
||||||
|
output);
|
||||||
|
}
|
||||||
|
|
||||||
|
*instance_port = static_cast<uint16_t>(int_port);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
@@ -0,0 +1,90 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_LOCAL_ASSETS_STREAM_MANAGER_SERVICE_IMPL_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_LOCAL_ASSETS_STREAM_MANAGER_SERVICE_IMPL_H_
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/status/statusor.h"
|
||||||
|
#include "asset_stream_manager/session_config.h"
|
||||||
|
#include "metrics/metrics.h"
|
||||||
|
#include "proto/local_assets_stream_manager.grpc.pb.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class MultiSession;
|
||||||
|
class ProcessFactory;
|
||||||
|
class SessionManager;
|
||||||
|
|
||||||
|
// Implements a service to start and stop streaming sessions as a server.
|
||||||
|
// The corresponding clients are implemented by the ggp CLI and SDK Proxy.
|
||||||
|
// The CLI triggers StartSession() from `ggp instance mount --local-dir` and
|
||||||
|
// StopSession() from `ggp instance unmount`. SDK Proxy invokes StartSession()
|
||||||
|
// when a user starts a new game from the partner portal and sets an `Asset
|
||||||
|
// streaming directory` in the `Advanced settings` in the `Play settings`
|
||||||
|
// dialog.
|
||||||
|
// This service is owned by SessionManagementServer.
|
||||||
|
class LocalAssetsStreamManagerServiceImpl final
|
||||||
|
: public localassetsstreammanager::LocalAssetsStreamManager::Service {
|
||||||
|
public:
|
||||||
|
using StartSessionRequest = localassetsstreammanager::StartSessionRequest;
|
||||||
|
using StartSessionRequestOrigin =
|
||||||
|
localassetsstreammanager::StartSessionRequest_Origin;
|
||||||
|
using StartSessionResponse = localassetsstreammanager::StartSessionResponse;
|
||||||
|
using StopSessionRequest = localassetsstreammanager::StopSessionRequest;
|
||||||
|
using StopSessionResponse = localassetsstreammanager::StopSessionResponse;
|
||||||
|
|
||||||
|
LocalAssetsStreamManagerServiceImpl(
|
||||||
|
SessionManager* session_manager, ProcessFactory* process_factory,
|
||||||
|
metrics::MetricsService* const metrics_service);
|
||||||
|
~LocalAssetsStreamManagerServiceImpl();
|
||||||
|
|
||||||
|
// Starts a streaming session from path |request->workstation_directory()| to
|
||||||
|
// the instance with id |request->gamelet_id()|. Stops an existing session
|
||||||
|
// if it exists.
|
||||||
|
grpc::Status StartSession(grpc::ServerContext* context,
|
||||||
|
const StartSessionRequest* request,
|
||||||
|
StartSessionResponse* response) override
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Stops the streaming session to the instance with id
|
||||||
|
// |request->gamelet_id()|. Returns a NotFound error if no session exists.
|
||||||
|
grpc::Status StopSession(grpc::ServerContext* context,
|
||||||
|
const StopSessionRequest* request,
|
||||||
|
StopSessionResponse* response) override
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Convert StartSessionRequest enum to metrics enum.
|
||||||
|
metrics::RequestOrigin ConvertOrigin(StartSessionRequestOrigin origin) const;
|
||||||
|
|
||||||
|
// Initializes an ssh connection to a gamelet by calling 'ggp ssh init'.
|
||||||
|
// |instance_id| must be set, |project_id|, |organization_id| are optional.
|
||||||
|
// Returns |instance_ip| and |instance_port| (SSH port).
|
||||||
|
absl::Status InitSsh(const std::string& instance_id,
|
||||||
|
const std::string& project_id,
|
||||||
|
const std::string& organization_id,
|
||||||
|
std::string* instance_ip, uint16_t* instance_port);
|
||||||
|
|
||||||
|
const SessionConfig cfg_;
|
||||||
|
SessionManager* const session_manager_;
|
||||||
|
ProcessFactory* const process_factory_;
|
||||||
|
metrics::MetricsService* const metrics_service_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_LOCAL_ASSETS_STREAM_MANAGER_SERVICE_IMPL_H_
|
||||||
182
asset_stream_manager/main.cc
Normal file
182
asset_stream_manager/main.cc
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "absl/flags/flag.h"
|
||||||
|
#include "absl/flags/parse.h"
|
||||||
|
#include "absl_helper/jedec_size_flag.h"
|
||||||
|
#include "asset_stream_manager/asset_stream_config.h"
|
||||||
|
#include "asset_stream_manager/background_service_impl.h"
|
||||||
|
#include "asset_stream_manager/local_assets_stream_manager_service_impl.h"
|
||||||
|
#include "asset_stream_manager/session_management_server.h"
|
||||||
|
#include "asset_stream_manager/session_manager.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/process.h"
|
||||||
|
#include "common/sdk_util.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
#include "data_store/data_provider.h"
|
||||||
|
#include "data_store/disk_data_store.h"
|
||||||
|
#include "metrics/metrics.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr int kSessionManagementPort = 44432;
|
||||||
|
|
||||||
|
absl::Status Run(const AssetStreamConfig& cfg) {
|
||||||
|
WinProcessFactory process_factory;
|
||||||
|
metrics::MetricsService metrics_service;
|
||||||
|
|
||||||
|
SessionManager session_manager(cfg.session_cfg(), &process_factory,
|
||||||
|
&metrics_service);
|
||||||
|
BackgroundServiceImpl background_service;
|
||||||
|
LocalAssetsStreamManagerServiceImpl session_service(
|
||||||
|
&session_manager, &process_factory, &metrics_service);
|
||||||
|
|
||||||
|
SessionManagementServer sm_server(&session_service, &background_service,
|
||||||
|
&session_manager);
|
||||||
|
background_service.SetExitCallback(
|
||||||
|
[&sm_server]() { return sm_server.Shutdown(); });
|
||||||
|
|
||||||
|
RETURN_IF_ERROR(sm_server.Start(kSessionManagementPort));
|
||||||
|
if (!cfg.src_dir().empty()) {
|
||||||
|
MultiSession* ms_unused;
|
||||||
|
metrics::SessionStartStatus status_unused;
|
||||||
|
RETURN_IF_ERROR(session_manager.StartSession(
|
||||||
|
/*instance_id=*/cfg.instance_ip(), /*project_id=*/std::string(),
|
||||||
|
/*organization_id=*/std::string(), cfg.instance_ip(),
|
||||||
|
cfg.instance_port(), cfg.src_dir(), &ms_unused, &status_unused));
|
||||||
|
}
|
||||||
|
sm_server.RunUntilShutdown();
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitLogging(bool log_to_stdout, int verbosity) {
|
||||||
|
LogLevel level = cdc_ft::Log::VerbosityToLogLevel(verbosity);
|
||||||
|
if (log_to_stdout) {
|
||||||
|
cdc_ft::Log::Initialize(std::make_unique<cdc_ft::ConsoleLog>(level));
|
||||||
|
} else {
|
||||||
|
SdkUtil util;
|
||||||
|
cdc_ft::Log::Initialize(std::make_unique<cdc_ft::FileLog>(
|
||||||
|
level, util.GetLogPath("assets_stream_manager_v3").c_str()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Declare AS20 flags, so that AS30 can be used on older SDKs simply by
|
||||||
|
// replacing the binary. Note that the RETIRED_FLAGS macro can't be used
|
||||||
|
// because the flags contain dashes. This code mimics the macro.
|
||||||
|
absl::flags_internal::RetiredFlag<int> RETIRED_FLAGS_port;
|
||||||
|
absl::flags_internal::RetiredFlag<std::string> RETIRED_FLAGS_session_ports;
|
||||||
|
absl::flags_internal::RetiredFlag<std::string> RETIRED_FLAGS_gm_mount_point;
|
||||||
|
absl::flags_internal::RetiredFlag<bool> RETIRED_FLAGS_allow_edge;
|
||||||
|
const auto RETIRED_FLAGS_REG_port =
|
||||||
|
(RETIRED_FLAGS_port.Retire("port"),
|
||||||
|
::absl::flags_internal::FlagRegistrarEmpty{});
|
||||||
|
const auto RETIRED_FLAGS_REG_session_ports =
|
||||||
|
(RETIRED_FLAGS_session_ports.Retire("session-ports"),
|
||||||
|
::absl::flags_internal::FlagRegistrarEmpty{});
|
||||||
|
const auto RETIRED_FLAGS_REG_gm_mount_point =
|
||||||
|
(RETIRED_FLAGS_gm_mount_point.Retire("gamelet-mount-point"),
|
||||||
|
::absl::flags_internal::FlagRegistrarEmpty{});
|
||||||
|
const auto RETIRED_FLAGS_REG_allow_edge =
|
||||||
|
(RETIRED_FLAGS_allow_edge.Retire("allow-edge"),
|
||||||
|
::absl::flags_internal::FlagRegistrarEmpty{});
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
ABSL_FLAG(std::string, src_dir, "",
|
||||||
|
"Start a streaming session immediately from the given Windows path. "
|
||||||
|
"Used during development. Must have exactly one gamelet reserved or "
|
||||||
|
"specify the target gamelet with --instance.");
|
||||||
|
ABSL_FLAG(std::string, instance_ip, "",
|
||||||
|
"Connect to the instance with the given IP address for this session. "
|
||||||
|
"This flag is ignored unless --src_dir is set as well. Used "
|
||||||
|
"during development. ");
|
||||||
|
ABSL_FLAG(uint16_t, instance_port, 0,
|
||||||
|
"Connect to the instance through the given SSH port. "
|
||||||
|
"This flag is ignored unless --src_dir is set as well. Used "
|
||||||
|
"during development. ");
|
||||||
|
ABSL_FLAG(int, verbosity, 2, "Verbosity of the log output");
|
||||||
|
ABSL_FLAG(bool, debug, false, "Run FUSE filesystem in debug mode");
|
||||||
|
ABSL_FLAG(bool, singlethreaded, false,
|
||||||
|
"Run FUSE filesystem in singlethreaded mode");
|
||||||
|
ABSL_FLAG(bool, stats, false,
|
||||||
|
"Collect and print detailed streaming statistics");
|
||||||
|
ABSL_FLAG(bool, quiet, false,
|
||||||
|
"Do not print any output except errors and stats");
|
||||||
|
ABSL_FLAG(int, manifest_updater_threads, 4,
|
||||||
|
"Number of threads used to compute file hashes on the workstation.");
|
||||||
|
ABSL_FLAG(int, file_change_wait_duration_ms, 500,
|
||||||
|
"Time in milliseconds to wait until pushing a file change to the "
|
||||||
|
"instance after detecting it.");
|
||||||
|
ABSL_FLAG(bool, check, false, "Check FUSE consistency and log check results");
|
||||||
|
ABSL_FLAG(bool, log_to_stdout, false, "Log to stdout instead of to a file");
|
||||||
|
ABSL_FLAG(cdc_ft::JedecSize, cache_capacity,
|
||||||
|
cdc_ft::JedecSize(cdc_ft::DiskDataStore::kDefaultCapacity),
|
||||||
|
"Cache capacity. Supports common unit suffixes K, M, G.");
|
||||||
|
ABSL_FLAG(uint32_t, cleanup_timeout, cdc_ft::DataProvider::kCleanupTimeoutSec,
|
||||||
|
"Period in seconds at which instance cache cleanups are run");
|
||||||
|
ABSL_FLAG(uint32_t, access_idle_timeout, cdc_ft::DataProvider::kAccessIdleSec,
|
||||||
|
"Do not run instance cache cleanups for this many seconds after the "
|
||||||
|
"last file access");
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
absl::ParseCommandLine(argc, argv);
|
||||||
|
|
||||||
|
// Set up config. Allow overriding this config with
|
||||||
|
// %APPDATA%\GGP\services\assets_stream_manager_v3.json.
|
||||||
|
cdc_ft::SdkUtil sdk_util;
|
||||||
|
const std::string config_path = cdc_ft::path::Join(
|
||||||
|
sdk_util.GetServicesConfigPath(), "assets_stream_manager_v3.json");
|
||||||
|
cdc_ft::AssetStreamConfig cfg;
|
||||||
|
absl::Status cfg_load_status = cfg.LoadFromFile(config_path);
|
||||||
|
|
||||||
|
cdc_ft::InitLogging(cfg.log_to_stdout(), cfg.session_cfg().verbosity);
|
||||||
|
|
||||||
|
// Log status of loaded configuration. Errors are not critical.
|
||||||
|
if (cfg_load_status.ok()) {
|
||||||
|
LOG_INFO("Successfully loaded configuration file at '%s'", config_path);
|
||||||
|
} else if (absl::IsNotFound(cfg_load_status)) {
|
||||||
|
LOG_INFO("No configuration file found at '%s'", config_path);
|
||||||
|
} else {
|
||||||
|
LOG_ERROR("%s", cfg_load_status.message());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string flags_read = cfg.GetFlagsReadFromFile();
|
||||||
|
if (!flags_read.empty()) {
|
||||||
|
LOG_INFO(
|
||||||
|
"The following settings were read from the configuration file and "
|
||||||
|
"override the corresponding command line flags if set: %s",
|
||||||
|
flags_read);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string flag_errors = cfg.GetFlagReadErrors();
|
||||||
|
if (!flag_errors.empty()) {
|
||||||
|
LOG_WARNING("%s", flag_errors);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_DEBUG("Configuration:\n%s", cfg.ToString());
|
||||||
|
|
||||||
|
absl::Status status = cdc_ft::Run(cfg);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_ERROR("%s", status.ToString());
|
||||||
|
} else {
|
||||||
|
LOG_INFO("Asset stream manager shut down successfully.");
|
||||||
|
}
|
||||||
|
|
||||||
|
cdc_ft::Log::Shutdown();
|
||||||
|
static_assert(static_cast<int>(absl::StatusCode::kOk) == 0, "kOk not 0");
|
||||||
|
return static_cast<int>(status.code());
|
||||||
|
}
|
||||||
69
asset_stream_manager/metrics_recorder.cc
Normal file
69
asset_stream_manager/metrics_recorder.cc
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/metrics_recorder.h"
|
||||||
|
|
||||||
|
#include "common/log.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
MetricsRecorder::MetricsRecorder(metrics::MetricsService* const metrics_service)
|
||||||
|
: metrics_service_(metrics_service) {}
|
||||||
|
|
||||||
|
metrics::MetricsService* MetricsRecorder::GetMetricsService() const {
|
||||||
|
return metrics_service_;
|
||||||
|
}
|
||||||
|
|
||||||
|
MultiSessionMetricsRecorder::MultiSessionMetricsRecorder(
|
||||||
|
metrics::MetricsService* const metrics_service)
|
||||||
|
: MetricsRecorder(metrics_service),
|
||||||
|
multisession_id_(Util::GenerateUniqueId()) {}
|
||||||
|
|
||||||
|
MultiSessionMetricsRecorder::~MultiSessionMetricsRecorder() = default;
|
||||||
|
|
||||||
|
void MultiSessionMetricsRecorder::RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const {
|
||||||
|
if (!event.as_manager_data) {
|
||||||
|
event.as_manager_data =
|
||||||
|
std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
}
|
||||||
|
event.as_manager_data->multisession_id = multisession_id_;
|
||||||
|
metrics_service_->RecordEvent(std::move(event), code);
|
||||||
|
}
|
||||||
|
|
||||||
|
SessionMetricsRecorder::SessionMetricsRecorder(
|
||||||
|
metrics::MetricsService* const metrics_service,
|
||||||
|
const std::string& multisession_id, const std::string& project_id,
|
||||||
|
const std::string& organization_id)
|
||||||
|
: MetricsRecorder(metrics_service),
|
||||||
|
multisession_id_(multisession_id),
|
||||||
|
project_id_(project_id),
|
||||||
|
organization_id_(organization_id),
|
||||||
|
session_id_(Util::GenerateUniqueId()) {}
|
||||||
|
|
||||||
|
SessionMetricsRecorder::~SessionMetricsRecorder() = default;
|
||||||
|
|
||||||
|
void SessionMetricsRecorder::RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const {
|
||||||
|
if (!event.as_manager_data) {
|
||||||
|
event.as_manager_data =
|
||||||
|
std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
}
|
||||||
|
event.as_manager_data->multisession_id = multisession_id_;
|
||||||
|
event.as_manager_data->session_id = session_id_;
|
||||||
|
event.project_id = project_id_;
|
||||||
|
event.organization_id = organization_id_;
|
||||||
|
metrics_service_->RecordEvent(std::move(event), code);
|
||||||
|
}
|
||||||
|
} // namespace cdc_ft
|
||||||
77
asset_stream_manager/metrics_recorder.h
Normal file
77
asset_stream_manager/metrics_recorder.h
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_METRICS_RECORDER_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_METRICS_RECORDER_H_
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "common/util.h"
|
||||||
|
#include "metrics/enums.h"
|
||||||
|
#include "metrics/messages.h"
|
||||||
|
#include "metrics/metrics.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class MetricsRecorder {
|
||||||
|
public:
|
||||||
|
virtual void RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const = 0;
|
||||||
|
|
||||||
|
virtual metrics::MetricsService* GetMetricsService() const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
explicit MetricsRecorder(metrics::MetricsService* const metrics_service);
|
||||||
|
metrics::MetricsService* const metrics_service_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class MultiSessionMetricsRecorder : public MetricsRecorder {
|
||||||
|
public:
|
||||||
|
explicit MultiSessionMetricsRecorder(
|
||||||
|
metrics::MetricsService* const metrics_service);
|
||||||
|
~MultiSessionMetricsRecorder();
|
||||||
|
|
||||||
|
virtual void RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const;
|
||||||
|
|
||||||
|
const std::string& MultiSessionId() const { return multisession_id_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string multisession_id_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class SessionMetricsRecorder : public MetricsRecorder {
|
||||||
|
public:
|
||||||
|
explicit SessionMetricsRecorder(
|
||||||
|
metrics::MetricsService* const metrics_service,
|
||||||
|
const std::string& multisession_id, const std::string& project_id,
|
||||||
|
const std::string& organization_id);
|
||||||
|
~SessionMetricsRecorder();
|
||||||
|
|
||||||
|
virtual void RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const;
|
||||||
|
|
||||||
|
const std::string& SessionId() const { return session_id_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string multisession_id_;
|
||||||
|
std::string session_id_;
|
||||||
|
std::string project_id_;
|
||||||
|
std::string organization_id_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_METRICS_RECORDER_H_
|
||||||
131
asset_stream_manager/metrics_recorder_test.cc
Normal file
131
asset_stream_manager/metrics_recorder_test.cc
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/metrics_recorder.h"
|
||||||
|
|
||||||
|
#include "common/status_test_macros.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
#include "metrics/metrics.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
struct MetricsRecord {
|
||||||
|
MetricsRecord(metrics::DeveloperLogEvent dev_log_event,
|
||||||
|
metrics::EventType code)
|
||||||
|
: dev_log_event(std::move(dev_log_event)), code(code) {}
|
||||||
|
metrics::DeveloperLogEvent dev_log_event;
|
||||||
|
metrics::EventType code;
|
||||||
|
};
|
||||||
|
|
||||||
|
class MetricsServiceForTesting : public metrics::MetricsService {
|
||||||
|
public:
|
||||||
|
MetricsServiceForTesting() {
|
||||||
|
metrics_records_ = new std::vector<MetricsRecord>();
|
||||||
|
}
|
||||||
|
|
||||||
|
~MetricsServiceForTesting() { delete metrics_records_; }
|
||||||
|
|
||||||
|
void RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const override {
|
||||||
|
metrics_records_->push_back(MetricsRecord(std::move(event), code));
|
||||||
|
}
|
||||||
|
|
||||||
|
int NumberOfRecordRequests() { return (int)metrics_records_->size(); }
|
||||||
|
|
||||||
|
std::vector<MetricsRecord> GetEventsAndClear() {
|
||||||
|
return std::move(*metrics_records_);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<MetricsRecord>* metrics_records_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class MetricsRecorderTest : public ::testing::Test {
|
||||||
|
public:
|
||||||
|
void SetUp() override { metrics_service_ = new MetricsServiceForTesting(); }
|
||||||
|
|
||||||
|
void TearDown() override { delete metrics_service_; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
MetricsServiceForTesting* metrics_service_;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(MetricsRecorderTest, SendEventWithMultisessionId) {
|
||||||
|
MultiSessionMetricsRecorder target(metrics_service_);
|
||||||
|
metrics::DeveloperLogEvent q_evt;
|
||||||
|
q_evt.project_id = "proj/id";
|
||||||
|
q_evt.organization_id = "org/id";
|
||||||
|
|
||||||
|
target.RecordEvent(std::move(q_evt), metrics::EventType::kMultiSessionStart);
|
||||||
|
EXPECT_EQ(metrics_service_->NumberOfRecordRequests(), 1);
|
||||||
|
std::vector<MetricsRecord> requests = metrics_service_->GetEventsAndClear();
|
||||||
|
EXPECT_EQ(requests[0].code, metrics::EventType::kMultiSessionStart);
|
||||||
|
metrics::DeveloperLogEvent expected_evt;
|
||||||
|
expected_evt.project_id = "proj/id";
|
||||||
|
expected_evt.organization_id = "org/id";
|
||||||
|
expected_evt.as_manager_data =
|
||||||
|
std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
expected_evt.as_manager_data->multisession_id = target.MultiSessionId();
|
||||||
|
EXPECT_EQ(requests[0].dev_log_event, expected_evt);
|
||||||
|
EXPECT_FALSE(target.MultiSessionId().empty());
|
||||||
|
|
||||||
|
q_evt = metrics::DeveloperLogEvent();
|
||||||
|
q_evt.project_id = "proj/id";
|
||||||
|
q_evt.organization_id = "org/id";
|
||||||
|
target.RecordEvent(std::move(q_evt), metrics::EventType::kMultiSessionStart);
|
||||||
|
EXPECT_EQ(metrics_service_->NumberOfRecordRequests(), 1);
|
||||||
|
std::vector<MetricsRecord> requests2 = metrics_service_->GetEventsAndClear();
|
||||||
|
EXPECT_EQ(requests2[0].code, metrics::EventType::kMultiSessionStart);
|
||||||
|
EXPECT_EQ(requests2[0].dev_log_event, requests[0].dev_log_event);
|
||||||
|
|
||||||
|
MultiSessionMetricsRecorder target2(metrics_service_);
|
||||||
|
EXPECT_NE(target2.MultiSessionId(), target.MultiSessionId());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MetricsRecorderTest, SendEventWithSessionId) {
|
||||||
|
SessionMetricsRecorder target(metrics_service_, "id1", "m_proj", "m_org");
|
||||||
|
metrics::DeveloperLogEvent q_evt;
|
||||||
|
q_evt.project_id = "proj/id";
|
||||||
|
q_evt.organization_id = "org/id";
|
||||||
|
|
||||||
|
target.RecordEvent(std::move(q_evt), metrics::EventType::kSessionStart);
|
||||||
|
EXPECT_EQ(metrics_service_->NumberOfRecordRequests(), 1);
|
||||||
|
std::vector<MetricsRecord> requests = metrics_service_->GetEventsAndClear();
|
||||||
|
EXPECT_EQ(requests[0].code, metrics::EventType::kSessionStart);
|
||||||
|
metrics::DeveloperLogEvent expected_evt;
|
||||||
|
expected_evt.project_id = "m_proj";
|
||||||
|
expected_evt.organization_id = "m_org";
|
||||||
|
expected_evt.as_manager_data =
|
||||||
|
std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
expected_evt.as_manager_data->multisession_id = "id1";
|
||||||
|
expected_evt.as_manager_data->session_id = target.SessionId();
|
||||||
|
EXPECT_EQ(requests[0].dev_log_event, expected_evt);
|
||||||
|
EXPECT_FALSE(target.SessionId().empty());
|
||||||
|
|
||||||
|
q_evt = metrics::DeveloperLogEvent();
|
||||||
|
q_evt.project_id = "proj/id";
|
||||||
|
q_evt.organization_id = "org/id";
|
||||||
|
target.RecordEvent(std::move(q_evt), metrics::EventType::kSessionStart);
|
||||||
|
EXPECT_EQ(metrics_service_->NumberOfRecordRequests(), 1);
|
||||||
|
std::vector<MetricsRecord> requests2 = metrics_service_->GetEventsAndClear();
|
||||||
|
EXPECT_EQ(requests2[0].code, metrics::EventType::kSessionStart);
|
||||||
|
EXPECT_EQ(requests2[0].dev_log_event, requests[0].dev_log_event);
|
||||||
|
|
||||||
|
SessionMetricsRecorder target2(metrics_service_, "id2", "m_proj", "m_org");
|
||||||
|
EXPECT_NE(target2.SessionId(), target.SessionId());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
699
asset_stream_manager/multi_session.cc
Normal file
699
asset_stream_manager/multi_session.cc
Normal file
@@ -0,0 +1,699 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/multi_session.h"
|
||||||
|
|
||||||
|
#include "asset_stream_manager/session.h"
|
||||||
|
#include "common/file_watcher_win.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/platform.h"
|
||||||
|
#include "common/port_manager.h"
|
||||||
|
#include "common/process.h"
|
||||||
|
#include "common/util.h"
|
||||||
|
#include "data_store/disk_data_store.h"
|
||||||
|
#include "manifest/content_id.h"
|
||||||
|
#include "manifest/manifest_iterator.h"
|
||||||
|
#include "manifest/manifest_printer.h"
|
||||||
|
#include "manifest/manifest_proto_defs.h"
|
||||||
|
#include "metrics/enums.h"
|
||||||
|
#include "metrics/messages.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Ports used by the asset streaming service for local port forwarding on
|
||||||
|
// workstation and gamelet.
|
||||||
|
constexpr int kAssetStreamPortFirst = 44433;
|
||||||
|
constexpr int kAssetStreamPortLast = 44442;
|
||||||
|
|
||||||
|
// Stats output period (if enabled).
|
||||||
|
constexpr double kStatsPrintDelaySec = 0.1f;
|
||||||
|
|
||||||
|
ManifestUpdater::Operator FileWatcherActionToOperation(
|
||||||
|
FileWatcherWin::FileAction action) {
|
||||||
|
switch (action) {
|
||||||
|
case FileWatcherWin::FileAction::kAdded:
|
||||||
|
return ManifestUpdater::Operator::kAdd;
|
||||||
|
case FileWatcherWin::FileAction::kModified:
|
||||||
|
return ManifestUpdater::Operator::kUpdate;
|
||||||
|
case FileWatcherWin::FileAction::kDeleted:
|
||||||
|
return ManifestUpdater::Operator::kDelete;
|
||||||
|
}
|
||||||
|
// The switch must cover all actions.
|
||||||
|
LOG_ERROR("Unhandled action: %d", static_cast<int>(action));
|
||||||
|
assert(false);
|
||||||
|
return ManifestUpdater::Operator::kAdd;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts |modified_files| (as returned from the file watcher) into an
|
||||||
|
// OperationList (as required by the manifest updater).
|
||||||
|
ManifestUpdater::OperationList GetFileOperations(
|
||||||
|
const FileWatcherWin::FileMap& modified_files) {
|
||||||
|
AssetInfo ai;
|
||||||
|
ManifestUpdater::OperationList ops;
|
||||||
|
ops.reserve(modified_files.size());
|
||||||
|
for (const auto& [path, info] : modified_files) {
|
||||||
|
ai.path = path;
|
||||||
|
ai.type = info.is_dir ? AssetProto::DIRECTORY : AssetProto::FILE;
|
||||||
|
ai.size = info.size;
|
||||||
|
ai.mtime = info.mtime;
|
||||||
|
ops.emplace_back(FileWatcherActionToOperation(info.action), std::move(ai));
|
||||||
|
}
|
||||||
|
return ops;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
MultiSessionRunner::MultiSessionRunner(
|
||||||
|
std::string src_dir, DataStoreWriter* data_store,
|
||||||
|
ProcessFactory* process_factory, bool enable_stats,
|
||||||
|
absl::Duration wait_duration, uint32_t num_updater_threads,
|
||||||
|
MultiSessionMetricsRecorder const* metrics_recorder,
|
||||||
|
ManifestUpdatedCb manifest_updated_cb)
|
||||||
|
: src_dir_(std::move(src_dir)),
|
||||||
|
data_store_(data_store),
|
||||||
|
process_factory_(process_factory),
|
||||||
|
file_chunks_(enable_stats),
|
||||||
|
wait_duration_(wait_duration),
|
||||||
|
num_updater_threads_(num_updater_threads),
|
||||||
|
manifest_updated_cb_(std::move(manifest_updated_cb)),
|
||||||
|
metrics_recorder_(metrics_recorder) {
|
||||||
|
assert(metrics_recorder_);
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSessionRunner::Initialize(int port,
|
||||||
|
AssetStreamServerType type,
|
||||||
|
ContentSentHandler content_sent) {
|
||||||
|
// Start the server.
|
||||||
|
assert(!server_);
|
||||||
|
server_ = AssetStreamServer::Create(type, src_dir_, data_store_,
|
||||||
|
&file_chunks_, content_sent);
|
||||||
|
assert(server_);
|
||||||
|
RETURN_IF_ERROR(server_->Start(port),
|
||||||
|
"Failed to start asset stream server for '%s'", src_dir_);
|
||||||
|
|
||||||
|
assert(!thread_);
|
||||||
|
thread_ = std::make_unique<std::thread>([this]() { Run(); });
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSessionRunner::Shutdown() {
|
||||||
|
// Send shutdown signal.
|
||||||
|
{
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
shutdown_ = true;
|
||||||
|
}
|
||||||
|
if (thread_) {
|
||||||
|
if (thread_->joinable()) thread_->join();
|
||||||
|
thread_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shut down asset stream server.
|
||||||
|
if (server_) {
|
||||||
|
server_->Shutdown();
|
||||||
|
server_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
return status_;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSessionRunner::WaitForManifestAck(
|
||||||
|
const std::string& instance_id, absl::Duration fuse_timeout) {
|
||||||
|
{
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
|
||||||
|
LOG_INFO("Waiting for manifest to be available");
|
||||||
|
auto cond = [this]() { return manifest_set_ || !status_.ok(); };
|
||||||
|
mutex_.Await(absl::Condition(&cond));
|
||||||
|
|
||||||
|
if (!status_.ok())
|
||||||
|
return WrapStatus(status_, "Failed to set up streaming session for '%s'",
|
||||||
|
src_dir_);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("Waiting for FUSE ack");
|
||||||
|
assert(server_);
|
||||||
|
RETURN_IF_ERROR(server_->WaitForManifestAck(instance_id, fuse_timeout));
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSessionRunner::Status() {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
return status_;
|
||||||
|
}
|
||||||
|
|
||||||
|
ContentIdProto MultiSessionRunner::ManifestId() const {
|
||||||
|
assert(server_);
|
||||||
|
return server_->GetManifestId();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSessionRunner::Run() {
|
||||||
|
// Create the manifest updater.
|
||||||
|
UpdaterConfig cfg;
|
||||||
|
cfg.num_threads = num_updater_threads_;
|
||||||
|
cfg.src_dir = src_dir_;
|
||||||
|
ManifestUpdater manifest_updater(data_store_, std::move(cfg));
|
||||||
|
|
||||||
|
// Set up file watcher.
|
||||||
|
// The streamed path should be a directory and exist at the beginning.
|
||||||
|
FileWatcherWin watcher(src_dir_);
|
||||||
|
absl::Status status = watcher.StartWatching([this]() { OnFilesChanged(); },
|
||||||
|
[this]() { OnDirRecreated(); });
|
||||||
|
if (!status.ok()) {
|
||||||
|
SetStatus(
|
||||||
|
WrapStatus(status, "Failed to update manifest for '%s'", src_dir_));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push an intermediate manifest containing the full directory structure, but
|
||||||
|
// potentially missing chunks. The purpose is that the FUSE can immediately
|
||||||
|
// show the structure and inode stats. FUSE will block on file reads that
|
||||||
|
// cannot be served due to missing chunks until the manifest is ready.
|
||||||
|
auto push_intermediate_manifest = [this](const ContentIdProto& manifest_id) {
|
||||||
|
SetManifest(manifest_id);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Bring the manifest up to date.
|
||||||
|
LOG_INFO("Updating manifest for '%s'...", src_dir_);
|
||||||
|
Stopwatch sw;
|
||||||
|
status =
|
||||||
|
manifest_updater.UpdateAll(&file_chunks_, push_intermediate_manifest);
|
||||||
|
RecordManifestUpdate(manifest_updater, sw.Elapsed(),
|
||||||
|
metrics::UpdateTrigger::kInitUpdateAll, status);
|
||||||
|
if (!status.ok()) {
|
||||||
|
SetStatus(
|
||||||
|
WrapStatus(status, "Failed to update manifest for '%s'", src_dir_));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
RecordMultiSessionStart(manifest_updater);
|
||||||
|
SetManifest(manifest_updater.ManifestId());
|
||||||
|
LOG_INFO("Manifest for '%s' updated in %0.3f seconds", src_dir_,
|
||||||
|
sw.ElapsedSeconds());
|
||||||
|
|
||||||
|
while (!shutdown_) {
|
||||||
|
FileWatcherWin::FileMap modified_files;
|
||||||
|
bool clean_manifest = false;
|
||||||
|
{
|
||||||
|
// Wait for changes.
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
|
||||||
|
bool prev_files_changed = files_changed_;
|
||||||
|
absl::Duration timeout =
|
||||||
|
absl::Seconds(file_chunks_.HasStats() ? kStatsPrintDelaySec : 3600.0);
|
||||||
|
if (files_changed_) {
|
||||||
|
timeout = std::max(wait_duration_ - files_changed_timer_.Elapsed(),
|
||||||
|
absl::Milliseconds(1));
|
||||||
|
} else {
|
||||||
|
files_changed_timer_.Reset();
|
||||||
|
}
|
||||||
|
auto cond = [this]() {
|
||||||
|
return shutdown_ || files_changed_ || dir_recreated_;
|
||||||
|
};
|
||||||
|
mutex_.AwaitWithTimeout(absl::Condition(&cond), timeout);
|
||||||
|
|
||||||
|
// If |files_changed_| became true, wait some more time before updating
|
||||||
|
// the manifest.
|
||||||
|
if (!prev_files_changed && files_changed_) files_changed_timer_.Reset();
|
||||||
|
|
||||||
|
// Shut down.
|
||||||
|
if (shutdown_) {
|
||||||
|
LOG_INFO("MultiSession('%s'): Shutting down", src_dir_);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pick up modified files.
|
||||||
|
if (!dir_recreated_ && files_changed_ &&
|
||||||
|
files_changed_timer_.Elapsed() > wait_duration_) {
|
||||||
|
modified_files = watcher.GetModifiedFiles();
|
||||||
|
files_changed_ = false;
|
||||||
|
files_changed_timer_.Reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dir_recreated_) {
|
||||||
|
clean_manifest = true;
|
||||||
|
dir_recreated_ = false;
|
||||||
|
}
|
||||||
|
} // mutex_ lock
|
||||||
|
|
||||||
|
if (clean_manifest) {
|
||||||
|
LOG_DEBUG(
|
||||||
|
"Streamed directory '%s' was possibly re-created or not all changes "
|
||||||
|
"were detected, re-building the manifest",
|
||||||
|
src_dir_);
|
||||||
|
modified_files.clear();
|
||||||
|
sw.Reset();
|
||||||
|
status = manifest_updater.UpdateAll(&file_chunks_);
|
||||||
|
RecordManifestUpdate(manifest_updater, sw.Elapsed(),
|
||||||
|
metrics::UpdateTrigger::kRunningUpdateAll, status);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_WARNING(
|
||||||
|
"Updating manifest for '%s' after re-creating directory failed: "
|
||||||
|
"'%s'",
|
||||||
|
src_dir_, status.ToString());
|
||||||
|
SetManifest(manifest_updater.DefaultManifestId());
|
||||||
|
} else {
|
||||||
|
SetManifest(manifest_updater.ManifestId());
|
||||||
|
}
|
||||||
|
} else if (!modified_files.empty()) {
|
||||||
|
ManifestUpdater::OperationList ops = GetFileOperations(modified_files);
|
||||||
|
sw.Reset();
|
||||||
|
status = manifest_updater.Update(&ops, &file_chunks_);
|
||||||
|
RecordManifestUpdate(manifest_updater, sw.Elapsed(),
|
||||||
|
metrics::UpdateTrigger::kRegularUpdate, status);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_WARNING("Updating manifest for '%s' failed: %s", src_dir_,
|
||||||
|
status.ToString());
|
||||||
|
SetManifest(manifest_updater.DefaultManifestId());
|
||||||
|
} else {
|
||||||
|
SetManifest(manifest_updater.ManifestId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update stats output.
|
||||||
|
file_chunks_.PrintStats();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSessionRunner::RecordManifestUpdate(
|
||||||
|
const ManifestUpdater& manifest_updater, absl::Duration duration,
|
||||||
|
metrics::UpdateTrigger trigger, absl::Status status) {
|
||||||
|
metrics::DeveloperLogEvent evt;
|
||||||
|
evt.as_manager_data = std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
evt.as_manager_data->manifest_update_data =
|
||||||
|
std::make_unique<metrics::ManifestUpdateData>();
|
||||||
|
evt.as_manager_data->manifest_update_data->local_duration_ms =
|
||||||
|
absl::ToInt64Milliseconds(duration);
|
||||||
|
evt.as_manager_data->manifest_update_data->status = status.code();
|
||||||
|
evt.as_manager_data->manifest_update_data->trigger = trigger;
|
||||||
|
const UpdaterStats& stats = manifest_updater.Stats();
|
||||||
|
evt.as_manager_data->manifest_update_data->total_assets_added_or_updated =
|
||||||
|
stats.total_assets_added_or_updated;
|
||||||
|
evt.as_manager_data->manifest_update_data->total_assets_deleted =
|
||||||
|
stats.total_assets_deleted;
|
||||||
|
evt.as_manager_data->manifest_update_data->total_chunks = stats.total_chunks;
|
||||||
|
evt.as_manager_data->manifest_update_data->total_files_added_or_updated =
|
||||||
|
stats.total_files_added_or_updated;
|
||||||
|
evt.as_manager_data->manifest_update_data->total_files_failed =
|
||||||
|
stats.total_files_failed;
|
||||||
|
evt.as_manager_data->manifest_update_data->total_processed_bytes =
|
||||||
|
stats.total_processed_bytes;
|
||||||
|
metrics_recorder_->RecordEvent(std::move(evt),
|
||||||
|
metrics::EventType::kManifestUpdated);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSessionRunner::RecordMultiSessionStart(
|
||||||
|
const ManifestUpdater& manifest_updater) {
|
||||||
|
metrics::DeveloperLogEvent evt;
|
||||||
|
evt.as_manager_data = std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
evt.as_manager_data->multi_session_start_data =
|
||||||
|
std::make_unique<metrics::MultiSessionStartData>();
|
||||||
|
ManifestIterator manifest_iter(data_store_);
|
||||||
|
absl::Status status = manifest_iter.Open(manifest_updater.ManifestId());
|
||||||
|
if (status.ok()) {
|
||||||
|
const AssetProto* entry = &manifest_iter.Manifest().root_dir();
|
||||||
|
uint32_t file_count = 0;
|
||||||
|
uint64_t total_chunks = 0;
|
||||||
|
uint64_t total_processed_bytes = 0;
|
||||||
|
do {
|
||||||
|
if (entry->type() == AssetProto::FILE) {
|
||||||
|
++file_count;
|
||||||
|
total_chunks += entry->file_chunks_size();
|
||||||
|
total_processed_bytes += entry->file_size();
|
||||||
|
for (const IndirectChunkListProto& icl :
|
||||||
|
entry->file_indirect_chunks()) {
|
||||||
|
ChunkListProto list;
|
||||||
|
status = data_store_->GetProto(icl.chunk_list_id(), &list);
|
||||||
|
if (status.ok()) {
|
||||||
|
total_chunks += list.chunks_size();
|
||||||
|
} else {
|
||||||
|
LOG_WARNING("Could not get proto by id: '%s'. %s",
|
||||||
|
ContentId::ToHexString(icl.chunk_list_id()),
|
||||||
|
status.ToString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} while ((entry = manifest_iter.NextEntry()) != nullptr);
|
||||||
|
evt.as_manager_data->multi_session_start_data->file_count = file_count;
|
||||||
|
evt.as_manager_data->multi_session_start_data->chunk_count = total_chunks;
|
||||||
|
evt.as_manager_data->multi_session_start_data->byte_count =
|
||||||
|
total_processed_bytes;
|
||||||
|
} else {
|
||||||
|
LOG_WARNING("Could not open manifest by id: '%s'. %s",
|
||||||
|
ContentId::ToHexString(manifest_updater.ManifestId()),
|
||||||
|
status.ToString());
|
||||||
|
}
|
||||||
|
evt.as_manager_data->multi_session_start_data->min_chunk_size =
|
||||||
|
static_cast<uint64_t>(manifest_updater.Config().min_chunk_size);
|
||||||
|
evt.as_manager_data->multi_session_start_data->avg_chunk_size =
|
||||||
|
static_cast<uint64_t>(manifest_updater.Config().avg_chunk_size);
|
||||||
|
evt.as_manager_data->multi_session_start_data->max_chunk_size =
|
||||||
|
static_cast<uint64_t>(manifest_updater.Config().max_chunk_size);
|
||||||
|
metrics_recorder_->RecordEvent(std::move(evt),
|
||||||
|
metrics::EventType::kMultiSessionStart);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSessionRunner::SetStatus(absl::Status status)
|
||||||
|
ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
status_ = std::move(status);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSessionRunner::OnFilesChanged() {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
files_changed_ = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSessionRunner::OnDirRecreated() {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
dir_recreated_ = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSessionRunner::SetManifest(const ContentIdProto& manifest_id) {
|
||||||
|
server_->SetManifestId(manifest_id);
|
||||||
|
if (Log::Instance()->GetLogLevel() <= LogLevel::kVerbose) {
|
||||||
|
ManifestPrinter printer;
|
||||||
|
ManifestProto manifest_proto;
|
||||||
|
absl::Status status = data_store_->GetProto(manifest_id, &manifest_proto);
|
||||||
|
std::string manifest_text;
|
||||||
|
printer.PrintToString(manifest_proto, &manifest_text);
|
||||||
|
if (status.ok()) {
|
||||||
|
LOG_DEBUG("Set manifest '%s'\n'%s'", ContentId::ToHexString(manifest_id),
|
||||||
|
manifest_text);
|
||||||
|
} else {
|
||||||
|
LOG_WARNING("Could not retrieve manifest from the data store '%s'",
|
||||||
|
ContentId::ToHexString(manifest_id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify thread that starts the streaming session that a manifest has been
|
||||||
|
// set.
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
manifest_set_ = true;
|
||||||
|
if (manifest_updated_cb_) {
|
||||||
|
manifest_updated_cb_();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MultiSession::MultiSession(std::string src_dir, SessionConfig cfg,
|
||||||
|
ProcessFactory* process_factory,
|
||||||
|
MultiSessionMetricsRecorder const* metrics_recorder,
|
||||||
|
std::unique_ptr<DataStoreWriter> data_store)
|
||||||
|
: src_dir_(src_dir),
|
||||||
|
cfg_(cfg),
|
||||||
|
process_factory_(process_factory),
|
||||||
|
data_store_(std::move(data_store)),
|
||||||
|
metrics_recorder_(metrics_recorder) {
|
||||||
|
assert(metrics_recorder_);
|
||||||
|
}
|
||||||
|
|
||||||
|
MultiSession::~MultiSession() {
|
||||||
|
absl::Status status = Shutdown();
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_ERROR("Shutdown streaming from '%s' failed: %s", src_dir_,
|
||||||
|
status.ToString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSession::Initialize() {
|
||||||
|
// |data_store_| is not set in production, but it can be overridden for tests.
|
||||||
|
if (!data_store_) {
|
||||||
|
std::string cache_path;
|
||||||
|
ASSIGN_OR_RETURN(cache_path, GetCachePath(src_dir_));
|
||||||
|
ASSIGN_OR_RETURN(data_store_,
|
||||||
|
DiskDataStore::Create(/*depth=*/0, cache_path,
|
||||||
|
/*create_dirs=*/true),
|
||||||
|
"Failed to create data store for '%s'", cache_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find an available local port.
|
||||||
|
std::unordered_set<int> ports;
|
||||||
|
ASSIGN_OR_RETURN(
|
||||||
|
ports,
|
||||||
|
PortManager::FindAvailableLocalPorts(kAssetStreamPortFirst,
|
||||||
|
kAssetStreamPortLast, "127.0.0.1",
|
||||||
|
process_factory_, true),
|
||||||
|
"Failed to find an available local port in the range [%d, %d]",
|
||||||
|
kAssetStreamPortFirst, kAssetStreamPortLast);
|
||||||
|
assert(!ports.empty());
|
||||||
|
local_asset_stream_port_ = *ports.begin();
|
||||||
|
|
||||||
|
assert(!runner_);
|
||||||
|
runner_ = std::make_unique<MultiSessionRunner>(
|
||||||
|
src_dir_, data_store_.get(), process_factory_, cfg_.stats,
|
||||||
|
absl::Milliseconds(cfg_.file_change_wait_duration_ms),
|
||||||
|
cfg_.manifest_updater_threads, metrics_recorder_);
|
||||||
|
RETURN_IF_ERROR(runner_->Initialize(
|
||||||
|
local_asset_stream_port_, AssetStreamServerType::kGrpc,
|
||||||
|
[this](uint64_t bc, uint64_t cc, std::string id) {
|
||||||
|
this->OnContentSent(bc, cc, id);
|
||||||
|
}),
|
||||||
|
"Failed to initialize session runner");
|
||||||
|
StartHeartBeatCheck();
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSession::Shutdown() {
|
||||||
|
// Stop all sessions.
|
||||||
|
// TODO: Record error on multi-session end.
|
||||||
|
metrics_recorder_->RecordEvent(metrics::DeveloperLogEvent(),
|
||||||
|
metrics::EventType::kMultiSessionEnd);
|
||||||
|
{
|
||||||
|
absl::WriterMutexLock lock(&shutdownMu_);
|
||||||
|
shutdown_ = true;
|
||||||
|
}
|
||||||
|
while (!sessions_.empty()) {
|
||||||
|
std::string instance_id = sessions_.begin()->first;
|
||||||
|
RETURN_IF_ERROR(StopSession(instance_id),
|
||||||
|
"Failed to stop session for instance id %s", instance_id);
|
||||||
|
sessions_.erase(instance_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (runner_) {
|
||||||
|
RETURN_IF_ERROR(runner_->Shutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (heartbeat_watcher_.joinable()) {
|
||||||
|
heartbeat_watcher_.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSession::Status() {
|
||||||
|
return runner_ ? runner_->Status() : absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSession::StartSession(const std::string& instance_id,
|
||||||
|
const std::string& project_id,
|
||||||
|
const std::string& organization_id,
|
||||||
|
const std::string& instance_ip,
|
||||||
|
uint16_t instance_port) {
|
||||||
|
absl::MutexLock lock(&sessions_mutex_);
|
||||||
|
|
||||||
|
if (sessions_.find(instance_id) != sessions_.end()) {
|
||||||
|
return absl::InvalidArgumentError(absl::StrFormat(
|
||||||
|
"Session for instance id '%s' already exists", instance_id));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!runner_)
|
||||||
|
return absl::FailedPreconditionError("MultiSession not started");
|
||||||
|
|
||||||
|
absl::Status runner_status = runner_->Status();
|
||||||
|
if (!runner_status.ok()) {
|
||||||
|
return WrapStatus(runner_status,
|
||||||
|
"Failed to set up streaming session for '%s'", src_dir_);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto metrics_recorder = std::make_unique<SessionMetricsRecorder>(
|
||||||
|
metrics_recorder_->GetMetricsService(),
|
||||||
|
metrics_recorder_->MultiSessionId(), project_id, organization_id);
|
||||||
|
|
||||||
|
auto session =
|
||||||
|
std::make_unique<Session>(instance_id, instance_ip, instance_port, cfg_,
|
||||||
|
process_factory_, std::move(metrics_recorder));
|
||||||
|
RETURN_IF_ERROR(session->Start(local_asset_stream_port_,
|
||||||
|
kAssetStreamPortFirst, kAssetStreamPortLast));
|
||||||
|
|
||||||
|
// Wait for the FUSE to receive the intermediate manifest.
|
||||||
|
RETURN_IF_ERROR(runner_->WaitForManifestAck(instance_id, absl::Seconds(5)));
|
||||||
|
|
||||||
|
sessions_[instance_id] = std::move(session);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MultiSession::StopSession(const std::string& instance_id) {
|
||||||
|
absl::MutexLock lock(&sessions_mutex_);
|
||||||
|
|
||||||
|
if (sessions_.find(instance_id) == sessions_.end()) {
|
||||||
|
return absl::NotFoundError(
|
||||||
|
absl::StrFormat("No session for instance id '%s' found", instance_id));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!runner_)
|
||||||
|
return absl::FailedPreconditionError("MultiSession not started");
|
||||||
|
|
||||||
|
RETURN_IF_ERROR(sessions_[instance_id]->Stop());
|
||||||
|
sessions_.erase(instance_id);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MultiSession::HasSessionForInstance(const std::string& instance_id) {
|
||||||
|
absl::ReaderMutexLock lock(&sessions_mutex_);
|
||||||
|
return sessions_.find(instance_id) != sessions_.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MultiSession::IsSessionHealthy(const std::string& instance_id) {
|
||||||
|
absl::ReaderMutexLock lock(&sessions_mutex_);
|
||||||
|
auto iter = sessions_.find(instance_id);
|
||||||
|
if (iter == sessions_.end()) return false;
|
||||||
|
Session* session = iter->second.get();
|
||||||
|
assert(session);
|
||||||
|
return session->IsHealthy();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MultiSession::Empty() {
|
||||||
|
absl::ReaderMutexLock lock(&sessions_mutex_);
|
||||||
|
return sessions_.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t MultiSession::GetSessionCount() {
|
||||||
|
absl::ReaderMutexLock lock(&sessions_mutex_);
|
||||||
|
return static_cast<uint32_t>(sessions_.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
std::string MultiSession::GetCacheDir(std::string dir) {
|
||||||
|
// Get full path, or else "..\foo" and "C:\foo" are treated differently, even
|
||||||
|
// if they map to the same directory.
|
||||||
|
dir = path::GetFullPath(dir);
|
||||||
|
#if PLATFORM_WINDOWS
|
||||||
|
// On Windows, casing is ignored.
|
||||||
|
std::for_each(dir.begin(), dir.end(), [](char& c) { c = tolower(c); });
|
||||||
|
#endif
|
||||||
|
path::EnsureEndsWithPathSeparator(&dir);
|
||||||
|
dir = path::ToUnix(std::move(dir));
|
||||||
|
ContentIdProto id = ContentId::FromDataString(dir);
|
||||||
|
|
||||||
|
// Replace invalid characters by _.
|
||||||
|
std::for_each(dir.begin(), dir.end(), [](char& c) {
|
||||||
|
static constexpr char invalid_chars[] = "<>:\"/\\|?*";
|
||||||
|
if (strchr(invalid_chars, c)) c = '_';
|
||||||
|
});
|
||||||
|
|
||||||
|
return dir + ContentId::ToHexString(id).substr(0, kDirHashLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
absl::StatusOr<std::string> MultiSession::GetCachePath(
|
||||||
|
const std::string& src_dir, size_t max_len) {
|
||||||
|
std::string appdata_path;
|
||||||
|
#if PLATFORM_WINDOWS
|
||||||
|
RETURN_IF_ERROR(
|
||||||
|
path::GetKnownFolderPath(path::FolderId::kRoamingAppData, &appdata_path),
|
||||||
|
"Failed to get roaming appdata path");
|
||||||
|
#elif PLATFORM_LINUX
|
||||||
|
RETURN_IF_ERROR(path::GetEnv("HOME", &appdata_path));
|
||||||
|
path::Append(&appdata_path, ".cache");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
std::string base_dir = path::Join(appdata_path, "GGP", "asset_streaming");
|
||||||
|
std::string cache_dir = GetCacheDir(src_dir);
|
||||||
|
|
||||||
|
size_t total_size = base_dir.size() + 1 + cache_dir.size();
|
||||||
|
if (total_size <= max_len) return path::Join(base_dir, cache_dir);
|
||||||
|
|
||||||
|
// Path needs to be shortened. Remove |to_remove| many chars from the
|
||||||
|
// beginning of |cache_dir|, but keep the hash (last kDirHashLen bytes).
|
||||||
|
size_t to_remove = total_size - max_len;
|
||||||
|
assert(cache_dir.size() >= kDirHashLen);
|
||||||
|
if (to_remove > cache_dir.size() - kDirHashLen)
|
||||||
|
to_remove = cache_dir.size() - kDirHashLen;
|
||||||
|
|
||||||
|
// Remove UTF8 code points from the beginning.
|
||||||
|
size_t start = 0;
|
||||||
|
while (start < to_remove) {
|
||||||
|
int codepoint_len = Util::Utf8CodePointLen(cache_dir.data() + start);
|
||||||
|
// For invalid code points (codepoint_len == 0), just eat byte by byte.
|
||||||
|
start += std::max(codepoint_len, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(start + kDirHashLen <= cache_dir.size());
|
||||||
|
return path::Join(base_dir, cache_dir.substr(start));
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSession::RecordMultiSessionEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) {
|
||||||
|
metrics_recorder_->RecordEvent(std::move(event), code);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSession::RecordSessionEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code,
|
||||||
|
const std::string& instance_id) {
|
||||||
|
Session* session = FindSession(instance_id);
|
||||||
|
if (session) {
|
||||||
|
session->RecordEvent(std::move(event), code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Session* MultiSession::FindSession(const std::string& instance_id) {
|
||||||
|
absl::ReaderMutexLock lock(&sessions_mutex_);
|
||||||
|
auto session_it = sessions_.find(instance_id);
|
||||||
|
if (session_it == sessions_.end()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return session_it->second.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSession::OnContentSent(size_t byte_count, size_t chunck_count,
|
||||||
|
std::string instance_id) {
|
||||||
|
if (instance_id.empty()) {
|
||||||
|
// |instance_id| is empty only in case when manifest wasn't acknowledged by
|
||||||
|
// the gamelet yet (ConfigStreamServiceImpl::AckManifestIdReceived was not
|
||||||
|
// invoked). This means MultiSession::StartSession is still waiting for
|
||||||
|
// manifest acknowledge and |sessions_mutex_| is currently locked. In this
|
||||||
|
// case invoking MultiSession::FindSession and waiting for |sessions_mutex_|
|
||||||
|
// to get unlocked will block the current thread, which is also responsible
|
||||||
|
// for receiving a call at ConfigStreamServiceImpl::AckManifestIdReceived.
|
||||||
|
// This causes a deadlock and leads to a DeadlineExceeded error.
|
||||||
|
LOG_WARNING("Can not record session content for an empty instance_id.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Session* session = FindSession(instance_id);
|
||||||
|
if (session == nullptr) {
|
||||||
|
LOG_WARNING("Failed to find active session by instrance id: %s",
|
||||||
|
instance_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
session->OnContentSent(byte_count, chunck_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MultiSession::StartHeartBeatCheck() {
|
||||||
|
heartbeat_watcher_ = std::thread([this]() ABSL_LOCKS_EXCLUDED(shutdownMu_) {
|
||||||
|
auto cond = [this]() { return shutdown_; };
|
||||||
|
while (!shutdownMu_.LockWhenWithTimeout(absl::Condition(&cond),
|
||||||
|
absl::Minutes(5))) {
|
||||||
|
absl::ReaderMutexLock lock(&sessions_mutex_);
|
||||||
|
for (auto it = sessions_.begin(); it != sessions_.end(); ++it) {
|
||||||
|
it->second->RecordHeartBeatIfChanged();
|
||||||
|
}
|
||||||
|
shutdownMu_.Unlock();
|
||||||
|
}
|
||||||
|
shutdownMu_.Unlock();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
266
asset_stream_manager/multi_session.h
Normal file
266
asset_stream_manager/multi_session.h
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_MULTI_SESSION_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_MULTI_SESSION_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/status/statusor.h"
|
||||||
|
#include "asset_stream_manager/asset_stream_server.h"
|
||||||
|
#include "asset_stream_manager/metrics_recorder.h"
|
||||||
|
#include "asset_stream_manager/session_config.h"
|
||||||
|
#include "common/stopwatch.h"
|
||||||
|
#include "data_store/data_store_writer.h"
|
||||||
|
#include "manifest/file_chunk_map.h"
|
||||||
|
#include "manifest/manifest_updater.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class ProcessFactory;
|
||||||
|
class Session;
|
||||||
|
using ManifestUpdatedCb = std::function<void()>;
|
||||||
|
|
||||||
|
// Updates the manifest and runs a file watcher in a background thread.
|
||||||
|
class MultiSessionRunner {
|
||||||
|
public:
|
||||||
|
// |src_dir| is the source directory on the workstation to stream.
|
||||||
|
// |data_store| can be passed for tests to override the default store used.
|
||||||
|
// |process_factory| abstracts process creation.
|
||||||
|
// |enable_stats| shows whether statistics should be derived.
|
||||||
|
// |wait_duration| is the waiting time for changes in the streamed directory.
|
||||||
|
// |num_updater_threads| is the thread count for the manifest updater.
|
||||||
|
// |manifest_updated_cb| is the callback executed when a new manifest is set.
|
||||||
|
MultiSessionRunner(
|
||||||
|
std::string src_dir, DataStoreWriter* data_store,
|
||||||
|
ProcessFactory* process_factory, bool enable_stats,
|
||||||
|
absl::Duration wait_duration, uint32_t num_updater_threads,
|
||||||
|
MultiSessionMetricsRecorder const* metrics_recorder,
|
||||||
|
ManifestUpdatedCb manifest_updated_cb = ManifestUpdatedCb());
|
||||||
|
|
||||||
|
~MultiSessionRunner() = default;
|
||||||
|
|
||||||
|
// Starts |server_| of |type| on |port|.
|
||||||
|
absl::Status Initialize(
|
||||||
|
int port, AssetStreamServerType type,
|
||||||
|
ContentSentHandler content_sent = ContentSentHandler());
|
||||||
|
|
||||||
|
// Stops updating the manifest and |server_|.
|
||||||
|
absl::Status Shutdown() ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Waits until a manifest is ready and the gamelet |instance_id| has
|
||||||
|
// acknowledged the reception of the currently set manifest id. |fuse_timeout|
|
||||||
|
// is the timeout for waiting for the FUSE manifest ack. The time required to
|
||||||
|
// generate the manifest is not part of this timeout as this could take a
|
||||||
|
// longer time for a directory with many files.
|
||||||
|
absl::Status WaitForManifestAck(const std::string& instance_id,
|
||||||
|
absl::Duration fuse_timeout);
|
||||||
|
|
||||||
|
absl::Status Status() ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Returns the current manifest id used by |server_|.
|
||||||
|
ContentIdProto ManifestId() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Updates manifest if the content of the watched directory changes and
|
||||||
|
// distributes it to subscribed gamelets.
|
||||||
|
void Run();
|
||||||
|
|
||||||
|
// Record MultiSessionStart event.
|
||||||
|
void RecordMultiSessionStart(const ManifestUpdater& manifest_updater);
|
||||||
|
|
||||||
|
// Record ManifestUpdate event.
|
||||||
|
void RecordManifestUpdate(const ManifestUpdater& manifest_updater,
|
||||||
|
absl::Duration duration,
|
||||||
|
metrics::UpdateTrigger trigger,
|
||||||
|
absl::Status status);
|
||||||
|
|
||||||
|
void SetStatus(absl::Status status) ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Files changed callback called from FileWatcherWin.
|
||||||
|
void OnFilesChanged() ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Directory recreated callback called from FileWatcherWin.
|
||||||
|
void OnDirRecreated() ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Called during manifest update when the intermediate manifest or the final
|
||||||
|
// manifest is available. Pushes the manifest to connected FUSEs.
|
||||||
|
void SetManifest(const ContentIdProto& manifest_id);
|
||||||
|
|
||||||
|
const std::string src_dir_;
|
||||||
|
DataStoreWriter* const data_store_;
|
||||||
|
ProcessFactory* const process_factory_;
|
||||||
|
FileChunkMap file_chunks_;
|
||||||
|
const absl::Duration wait_duration_;
|
||||||
|
const uint32_t num_updater_threads_;
|
||||||
|
const ManifestUpdatedCb manifest_updated_cb_;
|
||||||
|
std::unique_ptr<AssetStreamServer> server_;
|
||||||
|
|
||||||
|
// Modifications (shutdown, file changes).
|
||||||
|
absl::Mutex mutex_;
|
||||||
|
bool shutdown_ ABSL_GUARDED_BY(mutex_) = false;
|
||||||
|
bool files_changed_ ABSL_GUARDED_BY(mutex_) = false;
|
||||||
|
bool dir_recreated_ ABSL_GUARDED_BY(mutex_) = false;
|
||||||
|
bool manifest_set_ ABSL_GUARDED_BY(mutex_) = false;
|
||||||
|
Stopwatch files_changed_timer_ ABSL_GUARDED_BY(mutex_);
|
||||||
|
absl::Status status_ ABSL_GUARDED_BY(mutex_);
|
||||||
|
|
||||||
|
// Background thread that watches files and updates the manifest.
|
||||||
|
std::unique_ptr<std::thread> thread_;
|
||||||
|
|
||||||
|
MultiSessionMetricsRecorder const* metrics_recorder_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Manages an asset streaming session from a fixed directory on the workstation
|
||||||
|
// to an arbitrary number of gamelets.
|
||||||
|
class MultiSession {
|
||||||
|
public:
|
||||||
|
// Maximum length of cache path. We must be able to write content hashes into
|
||||||
|
// this path:
|
||||||
|
// <cache path>\01234567890123456789<null terminator> = 260 characters.
|
||||||
|
static constexpr size_t kDefaultMaxCachePathLen =
|
||||||
|
260 - 1 - ContentId::kHashSize * 2 - 1;
|
||||||
|
|
||||||
|
// Length of the hash appended to the cache directory, exposed for testing.
|
||||||
|
static constexpr size_t kDirHashLen = 8;
|
||||||
|
|
||||||
|
// |src_dir| is the source directory on the workstation to stream.
|
||||||
|
// |cfg| contains generic configuration parameters for each session.
|
||||||
|
// |process_factory| abstracts process creation.
|
||||||
|
// |data_store| can be passed for tests to override the default store used.
|
||||||
|
// By default, the class uses a DiskDataStore that writes to
|
||||||
|
// %APPDATA%\GGP\asset_streaming|<dir_derived_from_src_dir> on Windows.
|
||||||
|
MultiSession(std::string src_dir, SessionConfig cfg,
|
||||||
|
ProcessFactory* process_factory,
|
||||||
|
MultiSessionMetricsRecorder const* metrics_recorder,
|
||||||
|
std::unique_ptr<DataStoreWriter> data_store = nullptr);
|
||||||
|
~MultiSession();
|
||||||
|
|
||||||
|
// Initializes the data store if not overridden in the constructor and starts
|
||||||
|
// a background thread for updating the manifest and watching file changes.
|
||||||
|
// Does not wait for the initial manifest update to finish. Use IsRunning()
|
||||||
|
// to determine whether it is finished.
|
||||||
|
// Not thread-safe.
|
||||||
|
absl::Status Initialize();
|
||||||
|
|
||||||
|
// Stops all sessions and shuts down the server.
|
||||||
|
// Not thread-safe.
|
||||||
|
absl::Status Shutdown() ABSL_LOCKS_EXCLUDED(shutdownMu_);
|
||||||
|
|
||||||
|
// Returns the |src_dir| streaming directory passed to the constructor.
|
||||||
|
const std::string& src_dir() const { return src_dir_; }
|
||||||
|
|
||||||
|
// Returns the status of the background thread.
|
||||||
|
// Not thread-safe.
|
||||||
|
absl::Status Status();
|
||||||
|
|
||||||
|
// Starts a new streaming session to the instance with given |instance_id| and
|
||||||
|
// waits until the FUSE has received the initial manifest id.
|
||||||
|
// Returns an error if a session for that instance already exists.
|
||||||
|
// |instance_id| is the instance id of the target remote instance.
|
||||||
|
// |project_id| is id of the project that contains the instance.
|
||||||
|
// |organization_id| is id of the organization that contains the instance.
|
||||||
|
// |instance_ip| is the IP address of the instance.
|
||||||
|
// |instance_port| is the SSH port for connecting to the remote instance.
|
||||||
|
// Thread-safe.
|
||||||
|
absl::Status StartSession(const std::string& instance_id,
|
||||||
|
const std::string& project_id,
|
||||||
|
const std::string& organization_id,
|
||||||
|
const std::string& instance_ip,
|
||||||
|
uint16_t instance_port)
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Starts a new streaming session to the gamelet with given |instance_id|.
|
||||||
|
// Returns a NotFound error if a session for that instance does not exists.
|
||||||
|
// Thread-safe.
|
||||||
|
absl::Status StopSession(const std::string& instance_id)
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Returns true if there is an existing session for |instance_id|.
|
||||||
|
bool HasSessionForInstance(const std::string& instance_id)
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Returns true if the FUSE process is up and running for an existing session
|
||||||
|
// with ID |instance_id|.
|
||||||
|
bool IsSessionHealthy(const std::string& instance_id)
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Returns true if the MultiSession does not have any active sessions.
|
||||||
|
bool Empty() ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Returns the number of avtive sessions.
|
||||||
|
uint32_t GetSessionCount() ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// For a given source directory |dir|, e.g. "C:\path\to\game", returns a
|
||||||
|
// sanitized version of |dir| plus a hash of |dir|, e.g.
|
||||||
|
// "c__path_to_game_abcdef01".
|
||||||
|
static std::string GetCacheDir(std::string dir);
|
||||||
|
|
||||||
|
// Returns the directory where manifest chunks are cached, e.g.
|
||||||
|
// "%APPDATA%\GGP\asset_streaming\c__path_to_game_abcdef01" for
|
||||||
|
// "C:\path\to\game".
|
||||||
|
// The returned path is shortened to |max_len| by removing UTF8 code points
|
||||||
|
// from the beginning of the actual cache directory (c__path...) if necessary.
|
||||||
|
static absl::StatusOr<std::string> GetCachePath(
|
||||||
|
const std::string& src_dir, size_t max_len = kDefaultMaxCachePathLen);
|
||||||
|
|
||||||
|
// Record an event associated with the multi-session.
|
||||||
|
void RecordMultiSessionEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code);
|
||||||
|
|
||||||
|
// Record an event for a session associated with the |instance|.
|
||||||
|
void RecordSessionEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code,
|
||||||
|
const std::string& instance_id);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string src_dir_;
|
||||||
|
SessionConfig cfg_;
|
||||||
|
ProcessFactory* const process_factory_;
|
||||||
|
std::unique_ptr<DataStoreWriter> data_store_;
|
||||||
|
std::thread heartbeat_watcher_;
|
||||||
|
absl::Mutex shutdownMu_;
|
||||||
|
bool shutdown_ ABSL_GUARDED_BY(shutdownMu_) = false;
|
||||||
|
|
||||||
|
// Background thread for watching file changes and updating the manifest.
|
||||||
|
std::unique_ptr<MultiSessionRunner> runner_;
|
||||||
|
|
||||||
|
// Local forwarding port for the asset stream service.
|
||||||
|
int local_asset_stream_port_ = 0;
|
||||||
|
|
||||||
|
// Maps instance id to sessions.
|
||||||
|
std::unordered_map<std::string, std::unique_ptr<Session>> sessions_
|
||||||
|
ABSL_GUARDED_BY(sessions_mutex_);
|
||||||
|
absl::Mutex sessions_mutex_;
|
||||||
|
|
||||||
|
MultiSessionMetricsRecorder const* metrics_recorder_;
|
||||||
|
|
||||||
|
Session* FindSession(const std::string& instance_id)
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
void OnContentSent(size_t byte_count, size_t chunck_count,
|
||||||
|
std::string instance_id);
|
||||||
|
|
||||||
|
void StartHeartBeatCheck();
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_MULTI_SESSION_H_
|
||||||
488
asset_stream_manager/multi_session_test.cc
Normal file
488
asset_stream_manager/multi_session_test.cc
Normal file
@@ -0,0 +1,488 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/multi_session.h"
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/strings/match.h"
|
||||||
|
#include "asset_stream_manager/testing_asset_stream_server.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/platform.h"
|
||||||
|
#include "common/process.h"
|
||||||
|
#include "common/status_test_macros.h"
|
||||||
|
#include "common/test_main.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
#include "manifest/manifest_test_base.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
constexpr char kTestDir[] = "multisession_test_dir";
|
||||||
|
constexpr char kData[] = {10, 20, 30, 40, 50, 60, 70, 80, 90};
|
||||||
|
constexpr size_t kDataSize = sizeof(kData);
|
||||||
|
constexpr char kInstance[] = "test_instance";
|
||||||
|
constexpr int kPort = 44444;
|
||||||
|
constexpr absl::Duration kTimeout = absl::Milliseconds(5);
|
||||||
|
constexpr char kVeryLongPath[] =
|
||||||
|
"C:\\this\\is\\some\\really\\really\\really\\really\\really\\really\\really"
|
||||||
|
"\\really\\really\\really\\really\\really\\really\\really\\really\\really"
|
||||||
|
"\\really\\really\\really\\really\\really\\really\\really\\really\\really"
|
||||||
|
"\\really\\really\\really\\really\\really\\really\\really\\really\\really"
|
||||||
|
"\\really\\long\\path";
|
||||||
|
constexpr uint32_t kNumThreads = 1;
|
||||||
|
|
||||||
|
struct MetricsRecord {
|
||||||
|
MetricsRecord(metrics::DeveloperLogEvent evt, metrics::EventType code)
|
||||||
|
: evt(std::move(evt)), code(code) {}
|
||||||
|
metrics::DeveloperLogEvent evt;
|
||||||
|
metrics::EventType code;
|
||||||
|
};
|
||||||
|
|
||||||
|
class MetricsServiceForTest : public MultiSessionMetricsRecorder {
|
||||||
|
public:
|
||||||
|
MetricsServiceForTest() : MultiSessionMetricsRecorder(nullptr) {}
|
||||||
|
|
||||||
|
virtual ~MetricsServiceForTest() = default;
|
||||||
|
|
||||||
|
void RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const override
|
||||||
|
ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
metrics_records_.push_back(MetricsRecord(std::move(event), code));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<MetricsRecord> GetEventsAndClear(metrics::EventType type)
|
||||||
|
ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
std::vector<MetricsRecord> events;
|
||||||
|
std::vector<MetricsRecord> remaining;
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
for (size_t i = 0; i < metrics_records_.size(); ++i) {
|
||||||
|
if (metrics_records_[i].code == type) {
|
||||||
|
events.push_back(std::move(metrics_records_[i]));
|
||||||
|
} else {
|
||||||
|
remaining.push_back(std::move(metrics_records_[i]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metrics_records_ = std::move(remaining);
|
||||||
|
return events;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
mutable absl::Mutex mutex_;
|
||||||
|
mutable std::vector<MetricsRecord> metrics_records_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class MultiSessionTest : public ManifestTestBase {
|
||||||
|
public:
|
||||||
|
MultiSessionTest() : ManifestTestBase(GetTestDataDir("multi_session")) {
|
||||||
|
Log::Initialize(std::make_unique<ConsoleLog>(LogLevel::kInfo));
|
||||||
|
}
|
||||||
|
~MultiSessionTest() { Log::Shutdown(); }
|
||||||
|
|
||||||
|
void SetUp() override {
|
||||||
|
// Use a temporary directory to be able to test empty directories (git does
|
||||||
|
// not index empty directories) and creation/deletion of files.
|
||||||
|
EXPECT_OK(path::RemoveDirRec(test_dir_path_));
|
||||||
|
EXPECT_OK(path::CreateDirRec(test_dir_path_));
|
||||||
|
metrics_service_ = new MetricsServiceForTest();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TearDown() override {
|
||||||
|
EXPECT_OK(path::RemoveDirRec(test_dir_path_));
|
||||||
|
delete metrics_service_;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Callback if the manifest was updated == a new manifest is set.
|
||||||
|
void OnManifestUpdated() ABSL_LOCKS_EXCLUDED(mutex_) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
++num_manifest_updates_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Waits until the manifest is fully computed: the manifest id is not changed
|
||||||
|
// anymore.
|
||||||
|
bool WaitForManifestUpdated(uint32_t exp_num_manifest_updates,
|
||||||
|
absl::Duration timeout = absl::Seconds(5)) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
auto cond = [&]() {
|
||||||
|
return exp_num_manifest_updates == num_manifest_updates_;
|
||||||
|
};
|
||||||
|
mutex_.AwaitWithTimeout(absl::Condition(&cond), timeout);
|
||||||
|
return exp_num_manifest_updates == num_manifest_updates_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CheckMultiSessionStartNotRecorded() {
|
||||||
|
std::vector<MetricsRecord> events = metrics_service_->GetEventsAndClear(
|
||||||
|
metrics::EventType::kMultiSessionStart);
|
||||||
|
EXPECT_EQ(events.size(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CheckMultiSessionStartRecorded(uint64_t byte_count, uint64_t chunk_count,
|
||||||
|
uint32_t file_count) {
|
||||||
|
std::vector<MetricsRecord> events = metrics_service_->GetEventsAndClear(
|
||||||
|
metrics::EventType::kMultiSessionStart);
|
||||||
|
ASSERT_EQ(events.size(), 1);
|
||||||
|
metrics::MultiSessionStartData* data =
|
||||||
|
events[0].evt.as_manager_data->multi_session_start_data.get();
|
||||||
|
EXPECT_EQ(data->byte_count, byte_count);
|
||||||
|
EXPECT_EQ(data->chunk_count, chunk_count);
|
||||||
|
EXPECT_EQ(data->file_count, file_count);
|
||||||
|
EXPECT_EQ(data->min_chunk_size, 128 << 10);
|
||||||
|
EXPECT_EQ(data->avg_chunk_size, 256 << 10);
|
||||||
|
EXPECT_EQ(data->max_chunk_size, 1024 << 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics::ManifestUpdateData GetManifestUpdateData(
|
||||||
|
metrics::UpdateTrigger trigger, absl::StatusCode status,
|
||||||
|
size_t total_assets_added_or_updated, size_t total_assets_deleted,
|
||||||
|
size_t total_chunks, size_t total_files_added_or_updated,
|
||||||
|
size_t total_files_failed, size_t total_processed_bytes) {
|
||||||
|
metrics::ManifestUpdateData manifest_upd;
|
||||||
|
manifest_upd.trigger = trigger;
|
||||||
|
manifest_upd.status = status;
|
||||||
|
manifest_upd.total_assets_added_or_updated = total_assets_added_or_updated;
|
||||||
|
manifest_upd.total_assets_deleted = total_assets_deleted;
|
||||||
|
manifest_upd.total_chunks = total_chunks;
|
||||||
|
manifest_upd.total_files_added_or_updated = total_files_added_or_updated;
|
||||||
|
manifest_upd.total_files_failed = total_files_failed;
|
||||||
|
manifest_upd.total_processed_bytes = total_processed_bytes;
|
||||||
|
return manifest_upd;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CheckManifestUpdateRecorded(
|
||||||
|
std::vector<metrics::ManifestUpdateData> manifests) {
|
||||||
|
std::vector<MetricsRecord> events = metrics_service_->GetEventsAndClear(
|
||||||
|
metrics::EventType::kManifestUpdated);
|
||||||
|
ASSERT_EQ(events.size(), manifests.size());
|
||||||
|
for (size_t i = 0; i < manifests.size(); ++i) {
|
||||||
|
metrics::ManifestUpdateData* data =
|
||||||
|
events[i].evt.as_manager_data->manifest_update_data.get();
|
||||||
|
EXPECT_LT(data->local_duration_ms, 60000ull);
|
||||||
|
manifests[i].local_duration_ms = data->local_duration_ms;
|
||||||
|
EXPECT_EQ(*data, manifests[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string test_dir_path_ = path::Join(path::GetTempDir(), kTestDir);
|
||||||
|
WinProcessFactory process_factory_;
|
||||||
|
absl::Mutex mutex_;
|
||||||
|
uint32_t num_manifest_updates_ ABSL_GUARDED_BY(mutex_) = 0;
|
||||||
|
MetricsServiceForTest* metrics_service_;
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr char kCacheDir[] = "c__path_to_dir_ee54bbbc";
|
||||||
|
|
||||||
|
TEST_F(MultiSessionTest, GetCacheDir_IgnoresTrailingPathSeparators) {
|
||||||
|
EXPECT_EQ(MultiSession::GetCacheDir("C:\\path\\to\\dir"), kCacheDir);
|
||||||
|
EXPECT_EQ(MultiSession::GetCacheDir("C:\\path\\to\\dir\\"), kCacheDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MultiSessionTest, GetCacheDir_WorksWithForwardSlashes) {
|
||||||
|
EXPECT_EQ(MultiSession::GetCacheDir("C:/path/to/dir"), kCacheDir);
|
||||||
|
EXPECT_EQ(MultiSession::GetCacheDir("C:/path/to/dir/"), kCacheDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MultiSessionTest, GetCacheDir_ReplacesInvalidCharacters) {
|
||||||
|
EXPECT_EQ(MultiSession::GetCacheDir("C:\\<>:\"/\\|?*"),
|
||||||
|
"c___________ae188efd");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MultiSessionTest, GetCacheDir_UsesFullPath) {
|
||||||
|
EXPECT_EQ(MultiSession::GetCacheDir("foo/bar"),
|
||||||
|
MultiSession::GetCacheDir(path::GetFullPath("foo/bar")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#if PLATFORM_WINDOWS
|
||||||
|
TEST_F(MultiSessionTest, GetCacheDir_IgnoresCaseOnWindows) {
|
||||||
|
EXPECT_EQ(MultiSession::GetCacheDir("C:\\PATH\\TO\\DIR"), kCacheDir);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
TEST_F(MultiSessionTest, GetCachePath_ContainsExpectedParts) {
|
||||||
|
absl::StatusOr<std::string> cache_path =
|
||||||
|
MultiSession::GetCachePath("C:\\path\\to\\dir");
|
||||||
|
ASSERT_OK(cache_path);
|
||||||
|
EXPECT_TRUE(absl::EndsWith(*cache_path, kCacheDir)) << *cache_path;
|
||||||
|
EXPECT_TRUE(
|
||||||
|
absl::StrContains(*cache_path, path::Join("GGP", "asset_streaming")))
|
||||||
|
<< *cache_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MultiSessionTest, GetCachePath_ShortensLongPaths) {
|
||||||
|
EXPECT_GT(strlen(kVeryLongPath), MultiSession::kDefaultMaxCachePathLen);
|
||||||
|
std::string cache_dir = MultiSession::GetCacheDir(kVeryLongPath);
|
||||||
|
absl::StatusOr<std::string> cache_path =
|
||||||
|
MultiSession::GetCachePath(kVeryLongPath);
|
||||||
|
ASSERT_OK(cache_path);
|
||||||
|
EXPECT_EQ(cache_path->size(), MultiSession::kDefaultMaxCachePathLen);
|
||||||
|
EXPECT_TRUE(
|
||||||
|
absl::StrContains(*cache_path, path::Join("GGP", "asset_streaming")))
|
||||||
|
<< *cache_path;
|
||||||
|
// The hash in the end of the path is kept and not shortened.
|
||||||
|
EXPECT_EQ(cache_dir.substr(cache_dir.size() - MultiSession::kDirHashLen),
|
||||||
|
cache_path->substr(cache_path->size() - MultiSession::kDirHashLen));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MultiSessionTest, GetCachePath_DoesNotSplitUtfCodePoints) {
|
||||||
|
// Find out the length of the %APPDATA%\GGP\asset_streaming\" + hash part.
|
||||||
|
absl::StatusOr<std::string> cache_path = MultiSession::GetCachePath("");
|
||||||
|
ASSERT_OK(cache_path);
|
||||||
|
size_t base_len = cache_path->size();
|
||||||
|
|
||||||
|
// Path has are two 2-byte characters. They should not be split in the middle.
|
||||||
|
cache_path = MultiSession::GetCachePath(u8"\u0200\u0200", base_len);
|
||||||
|
ASSERT_OK(cache_path);
|
||||||
|
EXPECT_EQ(cache_path->size(), base_len);
|
||||||
|
|
||||||
|
// %APPDATA%\GGP\asset_streaming\abcdefg
|
||||||
|
cache_path = MultiSession::GetCachePath(u8"\u0200\u0200", base_len + 1);
|
||||||
|
ASSERT_OK(cache_path);
|
||||||
|
EXPECT_EQ(cache_path->size(), base_len);
|
||||||
|
|
||||||
|
// %APPDATA%\GGP\asset_streaming\\u0200abcdefg
|
||||||
|
cache_path = MultiSession::GetCachePath(u8"\u0200\u0200", base_len + 2);
|
||||||
|
ASSERT_OK(cache_path);
|
||||||
|
EXPECT_EQ(cache_path->size(), base_len + 2);
|
||||||
|
|
||||||
|
// %APPDATA%\GGP\asset_streaming\\u0200abcdefg
|
||||||
|
cache_path = MultiSession::GetCachePath(u8"\u0200\u0200", base_len + 3);
|
||||||
|
ASSERT_OK(cache_path);
|
||||||
|
EXPECT_EQ(cache_path->size(), base_len + 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate manifest for an empty directory.
|
||||||
|
TEST_F(MultiSessionTest, MultiSessionRunnerOnEmpty) {
|
||||||
|
cfg_.src_dir = test_dir_path_;
|
||||||
|
MultiSessionRunner runner(cfg_.src_dir, &data_store_, &process_factory_,
|
||||||
|
false /*enable_stats*/, kTimeout, kNumThreads,
|
||||||
|
metrics_service_,
|
||||||
|
[this]() { OnManifestUpdated(); });
|
||||||
|
EXPECT_OK(runner.Initialize(kPort, AssetStreamServerType::kTest));
|
||||||
|
EXPECT_OK(runner.WaitForManifestAck(kInstance, kTimeout));
|
||||||
|
// The first update is always the empty manifest, wait for the second one.
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(2));
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals({}, runner.ManifestId()));
|
||||||
|
CheckMultiSessionStartRecorded(0, 0, 0);
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{
|
||||||
|
GetManifestUpdateData(metrics::UpdateTrigger::kInitUpdateAll,
|
||||||
|
absl::StatusCode::kOk, 0, 0, 0, 0, 0, 0)});
|
||||||
|
|
||||||
|
EXPECT_OK(runner.Status());
|
||||||
|
EXPECT_OK(runner.Shutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate manifest for a non-empty directory.
|
||||||
|
TEST_F(MultiSessionTest, MultiSessionRunnerNonEmptySucceeds) {
|
||||||
|
// Contains a.txt, subdir/b.txt, subdir/c.txt, subdir/d.txt.
|
||||||
|
cfg_.src_dir = path::Join(base_dir_, "non_empty");
|
||||||
|
MultiSessionRunner runner(cfg_.src_dir, &data_store_, &process_factory_,
|
||||||
|
false /*enable_stats*/, kTimeout, kNumThreads,
|
||||||
|
metrics_service_,
|
||||||
|
[this]() { OnManifestUpdated(); });
|
||||||
|
EXPECT_OK(runner.Initialize(kPort, AssetStreamServerType::kTest));
|
||||||
|
EXPECT_OK(runner.WaitForManifestAck(kInstance, kTimeout));
|
||||||
|
// The first update is always the empty manifest, wait for the second one.
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(2));
|
||||||
|
CheckMultiSessionStartRecorded(46, 4, 4);
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals(
|
||||||
|
{"a.txt", "subdir", "subdir/b.txt", "subdir/c.txt", "subdir/d.txt"},
|
||||||
|
runner.ManifestId()));
|
||||||
|
EXPECT_OK(runner.Status());
|
||||||
|
EXPECT_OK(runner.Shutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update manifest on adding a file.
|
||||||
|
TEST_F(MultiSessionTest, MultiSessionRunnerAddFileSucceeds) {
|
||||||
|
cfg_.src_dir = test_dir_path_;
|
||||||
|
MultiSessionRunner runner(cfg_.src_dir, &data_store_, &process_factory_,
|
||||||
|
false /*enable_stats*/, kTimeout, kNumThreads,
|
||||||
|
metrics_service_,
|
||||||
|
[this]() { OnManifestUpdated(); });
|
||||||
|
EXPECT_OK(runner.Initialize(kPort, AssetStreamServerType::kTest));
|
||||||
|
EXPECT_OK(runner.WaitForManifestAck(kInstance, kTimeout));
|
||||||
|
// The first update is always the empty manifest, wait for the second one.
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(2));
|
||||||
|
ASSERT_OK(runner.Status());
|
||||||
|
CheckMultiSessionStartRecorded(0, 0, 0);
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals({}, runner.ManifestId()));
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{
|
||||||
|
GetManifestUpdateData(metrics::UpdateTrigger::kInitUpdateAll,
|
||||||
|
absl::StatusCode::kOk, 0, 0, 0, 0, 0, 0)});
|
||||||
|
|
||||||
|
const std::string file_path = path::Join(test_dir_path_, "file.txt");
|
||||||
|
EXPECT_OK(path::WriteFile(file_path, kData, kDataSize));
|
||||||
|
// 1 file was added = incremented exp_num_manifest_updates.
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(3));
|
||||||
|
ASSERT_NO_FATAL_FAILURE(
|
||||||
|
ExpectManifestEquals({"file.txt"}, runner.ManifestId()));
|
||||||
|
CheckMultiSessionStartNotRecorded();
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{
|
||||||
|
GetManifestUpdateData(metrics::UpdateTrigger::kRegularUpdate,
|
||||||
|
absl::StatusCode::kOk, 1, 0, 1, 1, 0, kDataSize)});
|
||||||
|
|
||||||
|
EXPECT_OK(runner.Status());
|
||||||
|
EXPECT_OK(runner.Shutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fail if the directory does not exist as the watching could not be started.
|
||||||
|
// At this moment we expect that the directory exists.
|
||||||
|
TEST_F(MultiSessionTest, MultiSessionRunnerNoDirFails) {
|
||||||
|
cfg_.src_dir = path::Join(base_dir_, "non_existing");
|
||||||
|
MultiSessionRunner runner(cfg_.src_dir, &data_store_, &process_factory_,
|
||||||
|
false /*enable_stats*/, kTimeout, kNumThreads,
|
||||||
|
metrics_service_,
|
||||||
|
[this]() { OnManifestUpdated(); });
|
||||||
|
EXPECT_OK(runner.Initialize(kPort, AssetStreamServerType::kTest));
|
||||||
|
|
||||||
|
ASSERT_FALSE(
|
||||||
|
absl::IsNotFound(runner.WaitForManifestAck(kInstance, kTimeout)));
|
||||||
|
ASSERT_FALSE(WaitForManifestUpdated(1, absl::Milliseconds(10)));
|
||||||
|
CheckMultiSessionStartNotRecorded();
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{});
|
||||||
|
EXPECT_NOT_OK(runner.Shutdown());
|
||||||
|
EXPECT_TRUE(absl::StrContains(runner.Status().ToString(),
|
||||||
|
"Could not start watching"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not break if the directory is recreated.
|
||||||
|
TEST_F(MultiSessionTest, MultiSessionRunnerDirRecreatedSucceeds) {
|
||||||
|
cfg_.src_dir = test_dir_path_;
|
||||||
|
EXPECT_OK(path::WriteFile(path::Join(test_dir_path_, "file.txt"), kData,
|
||||||
|
kDataSize));
|
||||||
|
|
||||||
|
MultiSessionRunner runner(cfg_.src_dir, &data_store_, &process_factory_,
|
||||||
|
false /*enable_stats*/, kTimeout, kNumThreads,
|
||||||
|
metrics_service_,
|
||||||
|
[this]() { OnManifestUpdated(); });
|
||||||
|
EXPECT_OK(runner.Initialize(kPort, AssetStreamServerType::kTest));
|
||||||
|
|
||||||
|
{
|
||||||
|
SCOPED_TRACE("Originally, only the streamed directory contains file.txt.");
|
||||||
|
EXPECT_OK(runner.WaitForManifestAck(kInstance, kTimeout));
|
||||||
|
// The first update is always the empty manifest, wait for the second one.
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(2));
|
||||||
|
CheckMultiSessionStartRecorded((uint64_t)kDataSize, 1, 1);
|
||||||
|
ASSERT_NO_FATAL_FAILURE(
|
||||||
|
ExpectManifestEquals({"file.txt"}, runner.ManifestId()));
|
||||||
|
CheckManifestUpdateRecorded(
|
||||||
|
std::vector<metrics::ManifestUpdateData>{GetManifestUpdateData(
|
||||||
|
metrics::UpdateTrigger::kInitUpdateAll, absl::StatusCode::kOk, 1, 0,
|
||||||
|
1, 1, 0, kDataSize)});
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
SCOPED_TRACE(
|
||||||
|
"Remove the streamed directory, the manifest should become empty.");
|
||||||
|
EXPECT_OK(path::RemoveDirRec(test_dir_path_));
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(3));
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals({}, runner.ManifestId()));
|
||||||
|
CheckManifestUpdateRecorded(
|
||||||
|
std::vector<metrics::ManifestUpdateData>{GetManifestUpdateData(
|
||||||
|
metrics::UpdateTrigger::kRunningUpdateAll,
|
||||||
|
absl::StatusCode::kNotFound, 1, 0, 1, 1, 0, kDataSize)});
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
SCOPED_TRACE(
|
||||||
|
"Create the watched directory -> an empty manifest should be "
|
||||||
|
"streamed.");
|
||||||
|
EXPECT_OK(path::CreateDirRec(test_dir_path_));
|
||||||
|
EXPECT_TRUE(WaitForManifestUpdated(4));
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals({}, runner.ManifestId()));
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{
|
||||||
|
GetManifestUpdateData(metrics::UpdateTrigger::kRunningUpdateAll,
|
||||||
|
absl::StatusCode::kOk, 0, 0, 0, 0, 0, 0)});
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
SCOPED_TRACE("Create 'new_file.txt' -> new manifest should be created.");
|
||||||
|
EXPECT_OK(path::WriteFile(path::Join(test_dir_path_, "new_file.txt"), kData,
|
||||||
|
kDataSize));
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(5));
|
||||||
|
ASSERT_NO_FATAL_FAILURE(
|
||||||
|
ExpectManifestEquals({"new_file.txt"}, runner.ManifestId()));
|
||||||
|
CheckManifestUpdateRecorded(
|
||||||
|
std::vector<metrics::ManifestUpdateData>{GetManifestUpdateData(
|
||||||
|
metrics::UpdateTrigger::kRegularUpdate, absl::StatusCode::kOk, 1, 0,
|
||||||
|
1, 1, 0, kDataSize)});
|
||||||
|
CheckMultiSessionStartNotRecorded();
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPECT_OK(runner.Status());
|
||||||
|
EXPECT_OK(runner.Shutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fail if the streamed source is a file.
|
||||||
|
TEST_F(MultiSessionTest, MultiSessionRunnerFileAsStreamedDirFails) {
|
||||||
|
cfg_.src_dir = path::Join(test_dir_path_, "file.txt");
|
||||||
|
EXPECT_OK(path::WriteFile(cfg_.src_dir, kData, kDataSize));
|
||||||
|
|
||||||
|
MultiSessionRunner runner(cfg_.src_dir, &data_store_, &process_factory_,
|
||||||
|
false /*enable_stats*/, kTimeout, kNumThreads,
|
||||||
|
metrics_service_,
|
||||||
|
[this]() { OnManifestUpdated(); });
|
||||||
|
EXPECT_OK(runner.Initialize(kPort, AssetStreamServerType::kTest));
|
||||||
|
ASSERT_FALSE(WaitForManifestUpdated(1, absl::Milliseconds(10)));
|
||||||
|
CheckMultiSessionStartNotRecorded();
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{});
|
||||||
|
EXPECT_NOT_OK(runner.Shutdown());
|
||||||
|
EXPECT_TRUE(absl::StrContains(runner.Status().ToString(),
|
||||||
|
"Failed to update manifest"))
|
||||||
|
<< runner.Status().ToString();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream an empty manifest if the streamed directory was re-created as a file.
|
||||||
|
TEST_F(MultiSessionTest,
|
||||||
|
MultiSessionRunnerDirRecreatedAsFileSucceedsWithEmptyManifest) {
|
||||||
|
cfg_.src_dir = path::Join(test_dir_path_, "file");
|
||||||
|
EXPECT_OK(path::CreateDirRec(cfg_.src_dir));
|
||||||
|
|
||||||
|
MultiSessionRunner runner(cfg_.src_dir, &data_store_, &process_factory_,
|
||||||
|
false /*enable_stats*/, kTimeout, kNumThreads,
|
||||||
|
metrics_service_,
|
||||||
|
[this]() { OnManifestUpdated(); });
|
||||||
|
EXPECT_OK(runner.Initialize(kPort, AssetStreamServerType::kTest));
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(2));
|
||||||
|
CheckMultiSessionStartRecorded(0, 0, 0);
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{
|
||||||
|
GetManifestUpdateData(metrics::UpdateTrigger::kInitUpdateAll,
|
||||||
|
absl::StatusCode::kOk, 0, 0, 0, 0, 0, 0)});
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals({}, runner.ManifestId()));
|
||||||
|
|
||||||
|
// Remove the streamed directory, the manifest should become empty.
|
||||||
|
EXPECT_OK(path::RemoveDirRec(cfg_.src_dir));
|
||||||
|
ASSERT_TRUE(WaitForManifestUpdated(3));
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals({}, runner.ManifestId()));
|
||||||
|
CheckManifestUpdateRecorded(std::vector<metrics::ManifestUpdateData>{
|
||||||
|
GetManifestUpdateData(metrics::UpdateTrigger::kRunningUpdateAll,
|
||||||
|
absl::StatusCode::kNotFound, 0, 0, 0, 0, 0, 0)});
|
||||||
|
|
||||||
|
EXPECT_OK(path::WriteFile(cfg_.src_dir, kData, kDataSize));
|
||||||
|
EXPECT_TRUE(WaitForManifestUpdated(4));
|
||||||
|
ASSERT_NO_FATAL_FAILURE(ExpectManifestEquals({}, runner.ManifestId()));
|
||||||
|
CheckManifestUpdateRecorded(
|
||||||
|
std::vector<metrics::ManifestUpdateData>{GetManifestUpdateData(
|
||||||
|
metrics::UpdateTrigger::kRunningUpdateAll,
|
||||||
|
absl::StatusCode::kFailedPrecondition, 0, 0, 0, 0, 0, 0)});
|
||||||
|
CheckMultiSessionStartNotRecorded();
|
||||||
|
|
||||||
|
EXPECT_OK(runner.Status());
|
||||||
|
EXPECT_OK(runner.Shutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
131
asset_stream_manager/session.cc
Normal file
131
asset_stream_manager/session.cc
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/session.h"
|
||||||
|
|
||||||
|
#include "asset_stream_manager/cdc_fuse_manager.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/port_manager.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
#include "metrics/enums.h"
|
||||||
|
#include "metrics/messages.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Timeout for initial gamelet connection.
|
||||||
|
constexpr double kInstanceConnectionTimeoutSec = 10.0f;
|
||||||
|
|
||||||
|
metrics::DeveloperLogEvent GetEventWithHeartBeatData(size_t bytes,
|
||||||
|
size_t chunks) {
|
||||||
|
metrics::DeveloperLogEvent evt;
|
||||||
|
evt.as_manager_data = std::make_unique<metrics::AssetStreamingManagerData>();
|
||||||
|
evt.as_manager_data->session_data = std::make_unique<metrics::SessionData>();
|
||||||
|
evt.as_manager_data->session_data->byte_count = bytes;
|
||||||
|
evt.as_manager_data->session_data->chunk_count = chunks;
|
||||||
|
return std::move(evt);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
Session::Session(std::string instance_id, std::string instance_ip,
|
||||||
|
uint16_t instance_port, SessionConfig cfg,
|
||||||
|
ProcessFactory* process_factory,
|
||||||
|
std::unique_ptr<SessionMetricsRecorder> metrics_recorder)
|
||||||
|
: instance_id_(std::move(instance_id)),
|
||||||
|
cfg_(std::move(cfg)),
|
||||||
|
process_factory_(process_factory),
|
||||||
|
remote_util_(cfg_.verbosity, cfg_.quiet, process_factory,
|
||||||
|
/*forward_output_to_logging=*/true),
|
||||||
|
metrics_recorder_(std::move(metrics_recorder)) {
|
||||||
|
assert(metrics_recorder_);
|
||||||
|
remote_util_.SetIpAndPort(instance_ip, instance_port);
|
||||||
|
}
|
||||||
|
|
||||||
|
Session::~Session() {
|
||||||
|
absl::Status status = Stop();
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_ERROR("Failed to stop session for instance '%s': %s", instance_id_,
|
||||||
|
status.ToString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status Session::Start(int local_port, int first_remote_port,
|
||||||
|
int last_remote_port) {
|
||||||
|
// Find an available remote port.
|
||||||
|
std::unordered_set<int> ports;
|
||||||
|
ASSIGN_OR_RETURN(
|
||||||
|
ports,
|
||||||
|
PortManager::FindAvailableRemotePorts(
|
||||||
|
first_remote_port, last_remote_port, "127.0.0.1", process_factory_,
|
||||||
|
&remote_util_, kInstanceConnectionTimeoutSec, true),
|
||||||
|
"Failed to find an available remote port in the range [%d, %d]",
|
||||||
|
first_remote_port, last_remote_port);
|
||||||
|
assert(!ports.empty());
|
||||||
|
int remote_port = *ports.begin();
|
||||||
|
|
||||||
|
assert(!fuse_);
|
||||||
|
fuse_ = std::make_unique<CdcFuseManager>(instance_id_, process_factory_,
|
||||||
|
&remote_util_);
|
||||||
|
RETURN_IF_ERROR(
|
||||||
|
fuse_->Start(local_port, remote_port, cfg_.verbosity, cfg_.fuse_debug,
|
||||||
|
cfg_.fuse_singlethreaded, cfg_.stats, cfg_.fuse_check,
|
||||||
|
cfg_.fuse_cache_capacity, cfg_.fuse_cleanup_timeout_sec,
|
||||||
|
cfg_.fuse_access_idle_timeout_sec),
|
||||||
|
"Failed to start instance component");
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status Session::Stop() {
|
||||||
|
absl::ReaderMutexLock lock(&transferred_data_mu_);
|
||||||
|
// TODO: Record error on session end.
|
||||||
|
metrics_recorder_->RecordEvent(
|
||||||
|
GetEventWithHeartBeatData(transferred_bytes_, transferred_chunks_),
|
||||||
|
metrics::EventType::kSessionEnd);
|
||||||
|
if (fuse_) {
|
||||||
|
RETURN_IF_ERROR(fuse_->Stop());
|
||||||
|
fuse_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Session::IsHealthy() { return fuse_->IsHealthy(); }
|
||||||
|
|
||||||
|
void Session::RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const {
|
||||||
|
metrics_recorder_->RecordEvent(std::move(event), code);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Session::OnContentSent(size_t bytes, size_t chunks) {
|
||||||
|
absl::WriterMutexLock lock(&transferred_data_mu_);
|
||||||
|
transferred_bytes_ += bytes;
|
||||||
|
transferred_chunks_ += chunks;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Session::RecordHeartBeatIfChanged() {
|
||||||
|
absl::ReaderMutexLock lock(&transferred_data_mu_);
|
||||||
|
if (transferred_bytes_ == last_read_bytes_ &&
|
||||||
|
transferred_chunks_ == last_read_chunks_) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
last_read_bytes_ = transferred_bytes_;
|
||||||
|
last_read_chunks_ = transferred_chunks_;
|
||||||
|
metrics_recorder_->RecordEvent(
|
||||||
|
GetEventWithHeartBeatData(last_read_bytes_, last_read_chunks_),
|
||||||
|
metrics::EventType::kSessionHeartBeat);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
90
asset_stream_manager/session.h
Normal file
90
asset_stream_manager/session.h
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_SESSION_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_SESSION_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "asset_stream_manager/metrics_recorder.h"
|
||||||
|
#include "asset_stream_manager/session_config.h"
|
||||||
|
#include "common/remote_util.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class CdcFuseManager;
|
||||||
|
class ProcessFactory;
|
||||||
|
class Process;
|
||||||
|
|
||||||
|
// Manages the connection of a workstation to a single gamelet.
|
||||||
|
class Session {
|
||||||
|
public:
|
||||||
|
// |instance_id| is a unique id for the remote instance.
|
||||||
|
// |instance_ip| is the IP address of the remote instance.
|
||||||
|
// |instance_port| is the SSH tunnel port for connecting to the instance.
|
||||||
|
// |cfg| contains generic configuration parameters for the session.
|
||||||
|
// |process_factory| abstracts process creation.
|
||||||
|
Session(std::string instance_id, std::string instance_ip,
|
||||||
|
uint16_t instance_port, SessionConfig cfg,
|
||||||
|
ProcessFactory* process_factory,
|
||||||
|
std::unique_ptr<SessionMetricsRecorder> metrics_recorder);
|
||||||
|
~Session();
|
||||||
|
|
||||||
|
// Starts the CDC FUSE on the instance with established port forwarding.
|
||||||
|
// |local_port| is the local reverse forwarding port to use.
|
||||||
|
// [|first_remote_port|, |last_remote_port|] are the allowed remote ports.
|
||||||
|
absl::Status Start(int local_port, int first_remote_port,
|
||||||
|
int last_remote_port);
|
||||||
|
|
||||||
|
// Shuts down the connection to the instance.
|
||||||
|
absl::Status Stop() ABSL_LOCKS_EXCLUDED(transferred_data_mu_);
|
||||||
|
|
||||||
|
// Returns true if the FUSE process is running.
|
||||||
|
bool IsHealthy();
|
||||||
|
|
||||||
|
// Record an event for the session.
|
||||||
|
void RecordEvent(metrics::DeveloperLogEvent event,
|
||||||
|
metrics::EventType code) const;
|
||||||
|
|
||||||
|
// Is called when content was sent during the session.
|
||||||
|
void OnContentSent(size_t bytes, size_t chunks)
|
||||||
|
ABSL_LOCKS_EXCLUDED(transferred_data_mu_);
|
||||||
|
|
||||||
|
// Records heart beat data if it has changed since last record.
|
||||||
|
void RecordHeartBeatIfChanged() ABSL_LOCKS_EXCLUDED(transferred_data_mu_);
|
||||||
|
|
||||||
|
private:
|
||||||
|
const std::string instance_id_;
|
||||||
|
const SessionConfig cfg_;
|
||||||
|
ProcessFactory* const process_factory_;
|
||||||
|
|
||||||
|
RemoteUtil remote_util_;
|
||||||
|
std::unique_ptr<CdcFuseManager> fuse_;
|
||||||
|
std::unique_ptr<SessionMetricsRecorder> metrics_recorder_;
|
||||||
|
|
||||||
|
absl::Mutex transferred_data_mu_;
|
||||||
|
uint64_t transferred_bytes_ ABSL_GUARDED_BY(transferred_data_mu_) = 0;
|
||||||
|
uint64_t transferred_chunks_ ABSL_GUARDED_BY(transferred_data_mu_) = 0;
|
||||||
|
uint64_t last_read_bytes_ = 0;
|
||||||
|
uint64_t last_read_chunks_ = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_SESSION_H_
|
||||||
63
asset_stream_manager/session_config.h
Normal file
63
asset_stream_manager/session_config.h
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_SESSION_CONFIG_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_SESSION_CONFIG_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// The values set in this config do not necessarily denote the default values.
|
||||||
|
// For the defaults, see the corresponding flag values.
|
||||||
|
struct SessionConfig {
|
||||||
|
// General log verbosity.
|
||||||
|
int verbosity = 0;
|
||||||
|
|
||||||
|
// Silence logs from process execution.
|
||||||
|
bool quiet = false;
|
||||||
|
|
||||||
|
// Print detailed streaming stats.
|
||||||
|
bool stats = false;
|
||||||
|
|
||||||
|
// Whether to run FUSE in debug mode.
|
||||||
|
bool fuse_debug = false;
|
||||||
|
|
||||||
|
// Whether to run FUSE in single-threaded mode.
|
||||||
|
bool fuse_singlethreaded = false;
|
||||||
|
|
||||||
|
// Whether to run FUSE consistency check.
|
||||||
|
bool fuse_check = false;
|
||||||
|
|
||||||
|
// Cache capacity with a suffix.
|
||||||
|
uint64_t fuse_cache_capacity = 0;
|
||||||
|
|
||||||
|
// Cleanup timeout in seconds.
|
||||||
|
uint32_t fuse_cleanup_timeout_sec = 0;
|
||||||
|
|
||||||
|
// Access idling timeout in seconds.
|
||||||
|
uint32_t fuse_access_idle_timeout_sec = 0;
|
||||||
|
|
||||||
|
// Number of threads used in the manifest updater to compute chunks/hashes.
|
||||||
|
uint32_t manifest_updater_threads = 0;
|
||||||
|
|
||||||
|
// Time to wait until running a manifest update after detecting a file change.
|
||||||
|
uint32_t file_change_wait_duration_ms = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_SESSION_CONFIG_H_
|
||||||
76
asset_stream_manager/session_management_server.cc
Normal file
76
asset_stream_manager/session_management_server.cc
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/session_management_server.h"
|
||||||
|
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "asset_stream_manager/background_service_impl.h"
|
||||||
|
#include "asset_stream_manager/local_assets_stream_manager_service_impl.h"
|
||||||
|
#include "asset_stream_manager/session_manager.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
#include "grpcpp/grpcpp.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
SessionManagementServer::SessionManagementServer(
|
||||||
|
grpc::Service* session_service, grpc::Service* background_service,
|
||||||
|
SessionManager* session_manager)
|
||||||
|
: session_service_(session_service),
|
||||||
|
background_service_(background_service),
|
||||||
|
session_manager_(session_manager) {}
|
||||||
|
|
||||||
|
SessionManagementServer::~SessionManagementServer() = default;
|
||||||
|
|
||||||
|
absl::Status SessionManagementServer::Start(int port) {
|
||||||
|
assert(!server_);
|
||||||
|
|
||||||
|
// Use 127.0.0.1 here to enforce IPv4. Otherwise, if only IPv4 is blocked on
|
||||||
|
// |port|, the server is started with IPv6 only, but clients are connecting
|
||||||
|
// with IPv4.
|
||||||
|
int selected_port = 0;
|
||||||
|
std::string server_address = absl::StrFormat("127.0.0.1:%i", port);
|
||||||
|
grpc::ServerBuilder builder;
|
||||||
|
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials(),
|
||||||
|
&selected_port);
|
||||||
|
builder.RegisterService(session_service_);
|
||||||
|
builder.RegisterService(background_service_);
|
||||||
|
server_ = builder.BuildAndStart();
|
||||||
|
if (selected_port != port) {
|
||||||
|
return MakeStatus(
|
||||||
|
"Failed to start session management server: Could not listen on port "
|
||||||
|
"%i. Is the port in use?",
|
||||||
|
port);
|
||||||
|
}
|
||||||
|
if (!server_) {
|
||||||
|
return MakeStatus(
|
||||||
|
"Failed to start session management server. Check asset_stream_manager "
|
||||||
|
"logs.");
|
||||||
|
}
|
||||||
|
LOG_INFO("Session management server listening on '%s'", server_address);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void SessionManagementServer::RunUntilShutdown() { server_->Wait(); }
|
||||||
|
|
||||||
|
absl::Status SessionManagementServer::Shutdown() {
|
||||||
|
assert(server_);
|
||||||
|
RETURN_IF_ERROR(session_manager_->Shutdown(),
|
||||||
|
"Failed to shut down session manager");
|
||||||
|
server_->Shutdown();
|
||||||
|
server_->Wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
63
asset_stream_manager/session_management_server.h
Normal file
63
asset_stream_manager/session_management_server.h
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_SESSION_MANAGEMENT_SERVER_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_SESSION_MANAGEMENT_SERVER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
|
||||||
|
namespace grpc {
|
||||||
|
class Server;
|
||||||
|
class Service;
|
||||||
|
} // namespace grpc
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class SessionManager;
|
||||||
|
class ProcessFactory;
|
||||||
|
|
||||||
|
// gRPC server for managing streaming sessions. Contains these services:
|
||||||
|
// - LocalAssetsStreamManager
|
||||||
|
// - Background
|
||||||
|
class SessionManagementServer {
|
||||||
|
public:
|
||||||
|
SessionManagementServer(grpc::Service* session_service,
|
||||||
|
grpc::Service* background_service,
|
||||||
|
SessionManager* session_manager);
|
||||||
|
~SessionManagementServer();
|
||||||
|
|
||||||
|
// Starts the server on the local port |port|.
|
||||||
|
absl::Status Start(int port);
|
||||||
|
|
||||||
|
// Waits until ProcessManager issues an Exit() request to the background
|
||||||
|
// service.
|
||||||
|
void RunUntilShutdown();
|
||||||
|
|
||||||
|
// Shuts down the session manager and the server.
|
||||||
|
absl::Status Shutdown();
|
||||||
|
|
||||||
|
private:
|
||||||
|
grpc::Service* session_service_;
|
||||||
|
grpc::Service* background_service_;
|
||||||
|
SessionManager* const session_manager_;
|
||||||
|
std::unique_ptr<grpc::Server> server_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_SESSION_MANAGEMENT_SERVER_H_
|
||||||
193
asset_stream_manager/session_manager.cc
Normal file
193
asset_stream_manager/session_manager.cc
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/session_manager.h"
|
||||||
|
|
||||||
|
#include "absl/strings/str_split.h"
|
||||||
|
#include "asset_stream_manager/multi_session.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/process.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
#include "manifest/manifest_updater.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Returns a key to uniquely map a streaming directory |src_dir| to a
|
||||||
|
// MultiSession instance.
|
||||||
|
std::string GetMultiSessionKey(const std::string src_dir) {
|
||||||
|
// Use the cache dir as a key to identify MultiSessions. That way, different
|
||||||
|
// representations of the same dir (e.g. dir and dir\) map to the same
|
||||||
|
// MultiSession.
|
||||||
|
return MultiSession::GetCacheDir(src_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
SessionManager::SessionManager(SessionConfig cfg,
|
||||||
|
ProcessFactory* process_factory,
|
||||||
|
metrics::MetricsService* metrics_service)
|
||||||
|
: cfg_(cfg),
|
||||||
|
process_factory_(process_factory),
|
||||||
|
metrics_service_(metrics_service) {}
|
||||||
|
|
||||||
|
SessionManager::~SessionManager() = default;
|
||||||
|
|
||||||
|
absl::Status SessionManager::Shutdown() {
|
||||||
|
absl::MutexLock lock(&sessions_mutex_);
|
||||||
|
|
||||||
|
for (const auto& [key, ms] : sessions_) {
|
||||||
|
LOG_INFO("Shutting down MultiSession for path '%s'", ms->src_dir());
|
||||||
|
RETURN_IF_ERROR(ms->Shutdown(),
|
||||||
|
"Failed to shut down MultiSession for path '%s'",
|
||||||
|
ms->src_dir());
|
||||||
|
}
|
||||||
|
sessions_.clear();
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status SessionManager::StartSession(
|
||||||
|
const std::string& instance_id, const std::string& project_id,
|
||||||
|
const std::string& organization_id, const std::string& instance_ip,
|
||||||
|
uint16_t instance_port, const std::string& src_dir,
|
||||||
|
MultiSession** multi_session, metrics::SessionStartStatus* metrics_status) {
|
||||||
|
*multi_session = nullptr;
|
||||||
|
*metrics_status = metrics::SessionStartStatus::kOk;
|
||||||
|
|
||||||
|
absl::MutexLock lock(&sessions_mutex_);
|
||||||
|
|
||||||
|
// Check if the directory is correct as early as possible.
|
||||||
|
absl::Status status = ManifestUpdater::IsValidDir(src_dir);
|
||||||
|
if (!status.ok()) {
|
||||||
|
absl::Status stop_status = StopSessionInternal(instance_id);
|
||||||
|
if (!stop_status.ok() && !absl::IsNotFound(stop_status)) {
|
||||||
|
LOG_ERROR("Failed to stop previous session for instance '%s': '%s'",
|
||||||
|
instance_id, stop_status.ToString());
|
||||||
|
}
|
||||||
|
*metrics_status = metrics::SessionStartStatus::kInvalidDirError;
|
||||||
|
return WrapStatus(status, "Failed to start session for path '%s'", src_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Early out if we are streaming the workstation dir to the given gamelet.
|
||||||
|
MultiSession* ms = GetMultiSession(src_dir);
|
||||||
|
*multi_session = ms;
|
||||||
|
if (ms && ms->HasSessionForInstance(instance_id)) {
|
||||||
|
if (ms->IsSessionHealthy(instance_id)) {
|
||||||
|
LOG_INFO("Reusing existing session");
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("Existing session for instance '%s' is not healthy. Restarting.",
|
||||||
|
instance_id);
|
||||||
|
|
||||||
|
// We could also fall through, but this might restart the MultiSession.
|
||||||
|
status = ms->StopSession(instance_id);
|
||||||
|
if (status.ok()) {
|
||||||
|
status = ms->StartSession(instance_id, project_id, organization_id,
|
||||||
|
instance_ip, instance_port);
|
||||||
|
}
|
||||||
|
if (!status.ok()) {
|
||||||
|
*metrics_status = metrics::SessionStartStatus::kRestartSessionError;
|
||||||
|
}
|
||||||
|
return WrapStatus(status, "Failed to restart session for instance '%s'",
|
||||||
|
instance_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are already streaming to the given gamelet, but from another
|
||||||
|
// workstation directory, stop that session.
|
||||||
|
// Note that NotFoundError is OK and expected (it means no session exists).
|
||||||
|
status = StopSessionInternal(instance_id);
|
||||||
|
if (!status.ok() && !absl::IsNotFound(status)) {
|
||||||
|
*metrics_status = metrics::SessionStartStatus::kStopSessionError;
|
||||||
|
return WrapStatus(status,
|
||||||
|
"Failed to stop previous session for instance '%s'",
|
||||||
|
instance_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get or create the MultiSession for the given workstation directory.
|
||||||
|
absl::StatusOr<MultiSession*> ms_res = GetOrCreateMultiSession(src_dir);
|
||||||
|
if (!ms_res.ok()) {
|
||||||
|
*metrics_status = metrics::SessionStartStatus::kCreateMultiSessionError;
|
||||||
|
return WrapStatus(ms_res.status(),
|
||||||
|
"Failed to create MultiSession for path '%s'", src_dir);
|
||||||
|
}
|
||||||
|
ms = ms_res.value();
|
||||||
|
*multi_session = ms;
|
||||||
|
|
||||||
|
// Start the session.
|
||||||
|
LOG_INFO("Starting streaming session from path '%s' to instance '%s'",
|
||||||
|
src_dir, instance_id);
|
||||||
|
status = ms->StartSession(instance_id, project_id, organization_id,
|
||||||
|
instance_ip, instance_port);
|
||||||
|
if (!status.ok()) {
|
||||||
|
*metrics_status = metrics::SessionStartStatus::kStartSessionError;
|
||||||
|
}
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status SessionManager::StopSession(const std::string& instance_id) {
|
||||||
|
absl::MutexLock lock(&sessions_mutex_);
|
||||||
|
return StopSessionInternal(instance_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
MultiSession* SessionManager::GetMultiSession(const std::string& src_dir) {
|
||||||
|
const std::string key = GetMultiSessionKey(src_dir);
|
||||||
|
SessionMap::iterator iter = sessions_.find(key);
|
||||||
|
return iter != sessions_.end() ? iter->second.get() : nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<MultiSession*> SessionManager::GetOrCreateMultiSession(
|
||||||
|
const std::string& src_dir) {
|
||||||
|
const std::string key = GetMultiSessionKey(src_dir);
|
||||||
|
SessionMap::iterator iter = sessions_.find(key);
|
||||||
|
if (iter == sessions_.end()) {
|
||||||
|
LOG_INFO("Creating new MultiSession for path '%s'", src_dir);
|
||||||
|
auto ms = std::make_unique<MultiSession>(
|
||||||
|
src_dir, cfg_, process_factory_,
|
||||||
|
new MultiSessionMetricsRecorder(metrics_service_));
|
||||||
|
RETURN_IF_ERROR(ms->Initialize(), "Failed to initialize MultiSession");
|
||||||
|
iter = sessions_.insert({key, std::move(ms)}).first;
|
||||||
|
}
|
||||||
|
|
||||||
|
return iter->second.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status SessionManager::StopSessionInternal(const std::string& instance) {
|
||||||
|
absl::Status status;
|
||||||
|
for (const auto& [key, ms] : sessions_) {
|
||||||
|
if (!ms->HasSessionForInstance(instance)) continue;
|
||||||
|
|
||||||
|
LOG_INFO("Stopping session streaming from '%s' to instance '%s'",
|
||||||
|
ms->src_dir(), instance);
|
||||||
|
RETURN_IF_ERROR(ms->StopSession(instance),
|
||||||
|
"Failed to stop session for instance '%s'", instance);
|
||||||
|
|
||||||
|
// Session was stopped. If the MultiSession is empty now, delete it.
|
||||||
|
if (ms->Empty()) {
|
||||||
|
LOG_INFO("Shutting down MultiSession for path '%s'", ms->src_dir());
|
||||||
|
RETURN_IF_ERROR(ms->Shutdown(),
|
||||||
|
"Failed to shut down MultiSession for path '%s'",
|
||||||
|
ms->src_dir());
|
||||||
|
sessions_.erase(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::NotFoundError(
|
||||||
|
absl::StrFormat("No session for instance id '%s' found", instance));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
100
asset_stream_manager/session_manager.h
Normal file
100
asset_stream_manager/session_manager.h
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_SESSION_MANAGER_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_SESSION_MANAGER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/status/statusor.h"
|
||||||
|
#include "absl/synchronization/mutex.h"
|
||||||
|
#include "asset_stream_manager/session_config.h"
|
||||||
|
#include "metrics/metrics.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class MultiSession;
|
||||||
|
class ProcessFactory;
|
||||||
|
|
||||||
|
// Implements a service to start and stop streaming sessions as a server.
|
||||||
|
// The corresponding clients are implemented by the ggp CLI and SDK Proxy.
|
||||||
|
// The CLI triggers StartSession() from `ggp instance mount --local-dir` and
|
||||||
|
// StopSession() from `ggp instance unmount`. SDK Proxy invokes StartSession()
|
||||||
|
// when a user starts a new game from the partner portal and sets an `Asset
|
||||||
|
// streaming directory` in the `Advanced settings` in the `Play settings`
|
||||||
|
// dialog.
|
||||||
|
// This service is owned by SessionManagementServer.
|
||||||
|
class SessionManager {
|
||||||
|
public:
|
||||||
|
SessionManager(SessionConfig cfg, ProcessFactory* process_factory,
|
||||||
|
metrics::MetricsService* metrics_service);
|
||||||
|
~SessionManager();
|
||||||
|
|
||||||
|
// Starts a session and populates |multi_session| and |metrics_status|.
|
||||||
|
absl::Status StartSession(const std::string& instance_id,
|
||||||
|
const std::string& project_id,
|
||||||
|
const std::string& organization_id,
|
||||||
|
const std::string& instance_ip,
|
||||||
|
uint16_t instance_port, const std::string& src_dir,
|
||||||
|
MultiSession** multi_session,
|
||||||
|
metrics::SessionStartStatus* metrics_status)
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Stops the session for the given |instance|. Returns a NotFound error if no
|
||||||
|
// session exists.
|
||||||
|
absl::Status StopSession(const std::string& instance)
|
||||||
|
ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Shuts down all existing MultiSessions.
|
||||||
|
absl::Status Shutdown() ABSL_LOCKS_EXCLUDED(sessions_mutex_);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Stops the session for the given |instance|. Returns a NotFound error if no
|
||||||
|
// session exists.
|
||||||
|
absl::Status StopSessionInternal(const std::string& instance)
|
||||||
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Returns the MultiSession for the given workstation directory |src_dir| or
|
||||||
|
// nullptr if it does not exist.
|
||||||
|
MultiSession* GetMultiSession(const std::string& src_dir)
|
||||||
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Gets an existing MultiSession or creates a new one for the given
|
||||||
|
// workstation directory |src_dir|.
|
||||||
|
absl::StatusOr<MultiSession*> GetOrCreateMultiSession(
|
||||||
|
const std::string& src_dir)
|
||||||
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(sessions_mutex_);
|
||||||
|
|
||||||
|
// Sets session start status for a metrics event.
|
||||||
|
void SetSessionStartStatus(metrics::DeveloperLogEvent* evt,
|
||||||
|
absl::Status absl_status,
|
||||||
|
metrics::SessionStartStatus status) const;
|
||||||
|
|
||||||
|
const SessionConfig cfg_;
|
||||||
|
ProcessFactory* const process_factory_;
|
||||||
|
metrics::MetricsService* const metrics_service_;
|
||||||
|
|
||||||
|
absl::Mutex sessions_mutex_;
|
||||||
|
using SessionMap =
|
||||||
|
std::unordered_map<std::string, std::unique_ptr<MultiSession>>;
|
||||||
|
SessionMap sessions_ ABSL_GUARDED_BY(sessions_mutex_);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_SESSION_MANAGER_H_
|
||||||
1
asset_stream_manager/testdata/multi_session/non_empty/a.txt
vendored
Normal file
1
asset_stream_manager/testdata/multi_session/non_empty/a.txt
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
aaaaaaaa
|
||||||
1
asset_stream_manager/testdata/multi_session/non_empty/subdir/b.txt
vendored
Normal file
1
asset_stream_manager/testdata/multi_session/non_empty/subdir/b.txt
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
|
||||||
1
asset_stream_manager/testdata/multi_session/non_empty/subdir/c.txt
vendored
Normal file
1
asset_stream_manager/testdata/multi_session/non_empty/subdir/c.txt
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
c
|
||||||
1
asset_stream_manager/testdata/multi_session/non_empty/subdir/d.txt
vendored
Normal file
1
asset_stream_manager/testdata/multi_session/non_empty/subdir/d.txt
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
d
|
||||||
0
asset_stream_manager/testdata/root.txt
vendored
Normal file
0
asset_stream_manager/testdata/root.txt
vendored
Normal file
50
asset_stream_manager/testing_asset_stream_server.cc
Normal file
50
asset_stream_manager/testing_asset_stream_server.cc
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "asset_stream_manager/testing_asset_stream_server.h"
|
||||||
|
|
||||||
|
#include "data_store/data_store_reader.h"
|
||||||
|
#include "manifest/file_chunk_map.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
TestingAssetStreamServer::TestingAssetStreamServer(
|
||||||
|
std::string src_dir, DataStoreReader* data_store_reader,
|
||||||
|
FileChunkMap* file_chunks)
|
||||||
|
: AssetStreamServer(src_dir, data_store_reader, file_chunks) {}
|
||||||
|
|
||||||
|
TestingAssetStreamServer::~TestingAssetStreamServer() = default;
|
||||||
|
|
||||||
|
absl::Status TestingAssetStreamServer::Start(int port) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestingAssetStreamServer::SetManifestId(
|
||||||
|
const ContentIdProto& manifest_id) {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
manifest_id_ = manifest_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status TestingAssetStreamServer::WaitForManifestAck(
|
||||||
|
const std::string& instance, absl::Duration timeout) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestingAssetStreamServer::Shutdown() {}
|
||||||
|
|
||||||
|
ContentIdProto TestingAssetStreamServer::GetManifestId() const {
|
||||||
|
absl::MutexLock lock(&mutex_);
|
||||||
|
return manifest_id_;
|
||||||
|
}
|
||||||
|
} // namespace cdc_ft
|
||||||
60
asset_stream_manager/testing_asset_stream_server.h
Normal file
60
asset_stream_manager/testing_asset_stream_server.h
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ASSET_STREAM_MANAGER_TESTING_ASSET_STREAM_SERVER_H_
|
||||||
|
#define ASSET_STREAM_MANAGER_TESTING_ASSET_STREAM_SERVER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "absl/base/thread_annotations.h"
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/synchronization/mutex.h"
|
||||||
|
#include "asset_stream_manager/grpc_asset_stream_server.h"
|
||||||
|
#include "manifest/manifest_proto_defs.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Not thread-safe testing server for streaming assets.
|
||||||
|
class TestingAssetStreamServer : public AssetStreamServer {
|
||||||
|
public:
|
||||||
|
TestingAssetStreamServer(std::string src_dir,
|
||||||
|
DataStoreReader* data_store_reader,
|
||||||
|
FileChunkMap* file_chunks);
|
||||||
|
|
||||||
|
~TestingAssetStreamServer();
|
||||||
|
|
||||||
|
// AssetStreamServer:
|
||||||
|
|
||||||
|
absl::Status Start(int port) override;
|
||||||
|
|
||||||
|
void SetManifestId(const ContentIdProto& manifest_id)
|
||||||
|
ABSL_LOCKS_EXCLUDED(mutex_) override;
|
||||||
|
|
||||||
|
absl::Status WaitForManifestAck(const std::string& instance,
|
||||||
|
absl::Duration timeout) override;
|
||||||
|
void Shutdown() override;
|
||||||
|
|
||||||
|
ContentIdProto GetManifestId() const ABSL_LOCKS_EXCLUDED(mutex_) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
mutable absl::Mutex mutex_;
|
||||||
|
ContentIdProto manifest_id_ ABSL_GUARDED_BY(mutex_);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // ASSET_STREAM_MANAGER_TESTING_ASSET_STREAM_SERVER_H_
|
||||||
3
cdc_fuse_fs/.gitignore
vendored
Normal file
3
cdc_fuse_fs/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
GGP/*
|
||||||
|
*.log
|
||||||
|
*.user
|
||||||
136
cdc_fuse_fs/BUILD
Normal file
136
cdc_fuse_fs/BUILD
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
package(default_visibility = ["//:__subpackages__"])
|
||||||
|
|
||||||
|
cc_binary(
|
||||||
|
name = "cdc_fuse_fs",
|
||||||
|
srcs = ["main.cc"],
|
||||||
|
deps = [
|
||||||
|
":cdc_fuse_fs_lib",
|
||||||
|
":constants",
|
||||||
|
"//absl_helper:jedec_size_flag",
|
||||||
|
"//common:gamelet_component",
|
||||||
|
"//common:log",
|
||||||
|
"//data_store:data_provider",
|
||||||
|
"//data_store:disk_data_store",
|
||||||
|
"//data_store:grpc_reader",
|
||||||
|
"@com_google_absl//absl/flags:parse",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Dependencies for cdc_fuse_fs_lib, except for FUSE.
|
||||||
|
cdc_fuse_fs_lib_shared_deps = [
|
||||||
|
":asset",
|
||||||
|
":asset_stream_client",
|
||||||
|
":config_stream_client",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:platform",
|
||||||
|
"//common:util",
|
||||||
|
"//common:threadpool",
|
||||||
|
"@com_github_jsoncpp//:jsoncpp",
|
||||||
|
]
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "cdc_fuse_fs_lib",
|
||||||
|
srcs = ["cdc_fuse_fs.cc"],
|
||||||
|
hdrs = ["cdc_fuse_fs.h"],
|
||||||
|
target_compatible_with = ["@platforms//os:linux"],
|
||||||
|
deps = cdc_fuse_fs_lib_shared_deps + ["@com_github_fuse//:fuse_shared"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "cdc_fuse_fs_lib_mocked",
|
||||||
|
srcs = ["cdc_fuse_fs.cc"],
|
||||||
|
hdrs = ["cdc_fuse_fs.h"],
|
||||||
|
copts = ["-DUSE_MOCK_LIBFUSE=1"],
|
||||||
|
deps = cdc_fuse_fs_lib_shared_deps + [":mock_libfuse"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "cdc_fuse_fs_test",
|
||||||
|
srcs = ["cdc_fuse_fs_test.cc"],
|
||||||
|
deps = [
|
||||||
|
":cdc_fuse_fs_lib_mocked",
|
||||||
|
"//common:status_test_macros",
|
||||||
|
"//data_store",
|
||||||
|
"//data_store:mem_data_store",
|
||||||
|
"//manifest:fake_manifest_builder",
|
||||||
|
"//manifest:manifest_builder",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
"@com_google_googletest//:gtest_main",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "mock_libfuse",
|
||||||
|
srcs = ["mock_libfuse.cc"],
|
||||||
|
hdrs = ["mock_libfuse.h"],
|
||||||
|
deps = ["//common:platform"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "constants",
|
||||||
|
hdrs = ["constants.h"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "asset_stream_client",
|
||||||
|
srcs = ["asset_stream_client.cc"],
|
||||||
|
hdrs = ["asset_stream_client.h"],
|
||||||
|
deps = [
|
||||||
|
"//common:log",
|
||||||
|
"//common:status_macros",
|
||||||
|
"//common:stopwatch",
|
||||||
|
"//manifest:manifest_proto_defs",
|
||||||
|
"//proto:asset_stream_service_grpc_proto",
|
||||||
|
"@com_google_absl//absl/status:statusor",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "asset",
|
||||||
|
srcs = ["asset.cc"],
|
||||||
|
hdrs = ["asset.h"],
|
||||||
|
deps = [
|
||||||
|
"//common:buffer",
|
||||||
|
"//common:status",
|
||||||
|
"//data_store",
|
||||||
|
"//manifest:content_id",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
"@com_google_absl//absl/status:statusor",
|
||||||
|
"@com_google_absl//absl/strings",
|
||||||
|
"@com_google_absl//absl/synchronization",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "asset_test",
|
||||||
|
srcs = ["asset_test.cc"],
|
||||||
|
deps = [
|
||||||
|
":asset",
|
||||||
|
"//common:path",
|
||||||
|
"//common:platform",
|
||||||
|
"//common:status_test_macros",
|
||||||
|
"//data_store:mem_data_store",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
"@com_google_googletest//:gtest_main",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "config_stream_client",
|
||||||
|
srcs = ["config_stream_client.cc"],
|
||||||
|
hdrs = ["config_stream_client.h"],
|
||||||
|
deps = [
|
||||||
|
"//common:grpc_status",
|
||||||
|
"//common:log",
|
||||||
|
"//manifest:content_id",
|
||||||
|
"//proto:asset_stream_service_grpc_proto",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all_test_sources",
|
||||||
|
srcs = glob(["*_test.cc"]),
|
||||||
|
)
|
||||||
520
cdc_fuse_fs/asset.cc
Normal file
520
cdc_fuse_fs/asset.cc
Normal file
@@ -0,0 +1,520 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_fuse_fs/asset.h"
|
||||||
|
|
||||||
|
#include "common/buffer.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "data_store/data_store_reader.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
Asset::Asset() = default;
|
||||||
|
|
||||||
|
Asset::~Asset() = default;
|
||||||
|
|
||||||
|
void Asset::Initialize(ino_t parent_ino, DataStoreReader* data_store_reader,
|
||||||
|
const AssetProto* proto) {
|
||||||
|
parent_ino_ = parent_ino;
|
||||||
|
|
||||||
|
assert(!data_store_reader_ && data_store_reader);
|
||||||
|
data_store_reader_ = data_store_reader;
|
||||||
|
|
||||||
|
assert(!proto_ && proto);
|
||||||
|
proto_ = proto;
|
||||||
|
|
||||||
|
// Create a lookup for the direct assets, if any.
|
||||||
|
// Lock the mutex for convenience, it's not strictly necessary here as no
|
||||||
|
// other thread has access to this object.
|
||||||
|
absl::WriterMutexLock lock(&mutex_);
|
||||||
|
UpdateProtoLookup(proto_->dir_assets());
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> Asset::GetAllChildProtos() {
|
||||||
|
mutex_.AssertNotHeld();
|
||||||
|
assert(proto_);
|
||||||
|
|
||||||
|
if (proto_->type() != AssetProto::DIRECTORY) {
|
||||||
|
return absl::InvalidArgumentError(
|
||||||
|
absl::StrFormat("Asset '%s' is not a directory asset", proto_->name()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch all indirect dir asset lists.
|
||||||
|
for (;;) {
|
||||||
|
bool list_was_fetched;
|
||||||
|
ASSIGN_OR_RETURN(list_was_fetched, FetchNextDirAssetList(),
|
||||||
|
"Failed to fetch directory assets");
|
||||||
|
if (!list_was_fetched) break;
|
||||||
|
}
|
||||||
|
return GetLoadedChildProtos();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<const AssetProto*> Asset::GetLoadedChildProtos() const {
|
||||||
|
absl::ReaderMutexLock read_lock(&mutex_);
|
||||||
|
|
||||||
|
// Push all directory asset protos to a vector.
|
||||||
|
std::vector<const AssetProto*> protos;
|
||||||
|
protos.reserve(proto_lookup_.size());
|
||||||
|
for (const std::pair<const absl::string_view, const AssetProto*>& kv :
|
||||||
|
proto_lookup_) {
|
||||||
|
protos.push_back(kv.second);
|
||||||
|
}
|
||||||
|
return protos;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<const AssetProto*> Asset::Lookup(const char* name) {
|
||||||
|
mutex_.AssertNotHeld();
|
||||||
|
assert(proto_);
|
||||||
|
if (proto_->type() != AssetProto::DIRECTORY) {
|
||||||
|
return absl::InvalidArgumentError(
|
||||||
|
absl::StrFormat("Asset '%s' is not a directory asset", proto_->name()));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
{
|
||||||
|
absl::ReaderMutexLock read_lock(&mutex_);
|
||||||
|
|
||||||
|
// Check if we already have the asset.
|
||||||
|
std::unordered_map<absl::string_view, const AssetProto*>::iterator it =
|
||||||
|
proto_lookup_.find(name);
|
||||||
|
if (it != proto_lookup_.end()) {
|
||||||
|
return it->second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch one more indirect asset list.
|
||||||
|
bool list_was_fetched;
|
||||||
|
ASSIGN_OR_RETURN(list_was_fetched, FetchNextDirAssetList(),
|
||||||
|
"Failed to fetch directory assets");
|
||||||
|
if (!list_was_fetched) {
|
||||||
|
// All lists were fetched, but asset still wasn't found.
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<uint64_t> Asset::Read(uint64_t offset, void* data,
|
||||||
|
uint64_t size) {
|
||||||
|
mutex_.AssertNotHeld();
|
||||||
|
assert(proto_);
|
||||||
|
if (proto_->type() != AssetProto::FILE)
|
||||||
|
return absl::InvalidArgumentError("Not a file asset");
|
||||||
|
|
||||||
|
if (size == 0) return 0;
|
||||||
|
|
||||||
|
// Find a chunk list such that list offset <= offset < next list offset.
|
||||||
|
int list_idx = FindChunkList(offset);
|
||||||
|
const RepeatedChunkRefProto* chunk_refs;
|
||||||
|
ASSIGN_OR_RETURN(chunk_refs, GetChunkRefList(list_idx),
|
||||||
|
"Failed to fetch indirect chunk list %i", list_idx);
|
||||||
|
uint64_t chunk_list_offset = ChunkListOffset(list_idx);
|
||||||
|
if (!chunk_refs) return 0; // Out of bounds.
|
||||||
|
|
||||||
|
// Find a chunk such that chunk offset <= offset < next chunk offset.
|
||||||
|
int chunk_idx = FindChunk(*chunk_refs, chunk_list_offset, offset);
|
||||||
|
if (chunk_idx < 0 || chunk_idx >= chunk_refs->size()) {
|
||||||
|
// Data is malformed, e.g. empty chunk list with non-zero file size.
|
||||||
|
return MakeStatus(
|
||||||
|
"Invalid chunk ref list %i. Found chunk index %i not in [0, %u).",
|
||||||
|
list_idx, chunk_idx, chunk_refs->size());
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t data_bytes_left = size;
|
||||||
|
uint64_t prefetch_bytes_left = data_store_reader_->PrefetchSize(size);
|
||||||
|
// Collect the chunk IDs required to satisfy the read request.
|
||||||
|
ChunkTransferList chunks;
|
||||||
|
while (chunk_refs) {
|
||||||
|
const ChunkRefProto& chunk_ref = chunk_refs->at(chunk_idx);
|
||||||
|
|
||||||
|
// Figure out how much data we have to read from the current chunk.
|
||||||
|
uint64_t chunk_absolute_offset = chunk_list_offset + chunk_ref.offset();
|
||||||
|
uint64_t chunk_offset =
|
||||||
|
offset > chunk_absolute_offset ? offset - chunk_absolute_offset : 0;
|
||||||
|
uint64_t chunk_size = ChunkSize(list_idx, chunk_idx, chunk_refs);
|
||||||
|
assert(chunk_size >= chunk_offset);
|
||||||
|
uint64_t bytes_to_read =
|
||||||
|
std::min<uint64_t>(chunk_size - chunk_offset, data_bytes_left);
|
||||||
|
uint64_t bytes_to_prefetch =
|
||||||
|
std::min<uint64_t>(chunk_size - chunk_offset, prefetch_bytes_left);
|
||||||
|
|
||||||
|
// Enqueue a chunk transfer task.
|
||||||
|
chunks.emplace_back(chunk_ref.chunk_id(), chunk_offset,
|
||||||
|
bytes_to_read ? data : nullptr, bytes_to_read);
|
||||||
|
data = static_cast<char*>(data) + bytes_to_read;
|
||||||
|
data_bytes_left =
|
||||||
|
data_bytes_left > bytes_to_read ? data_bytes_left - bytes_to_read : 0;
|
||||||
|
prefetch_bytes_left -= bytes_to_prefetch;
|
||||||
|
offset += bytes_to_prefetch;
|
||||||
|
|
||||||
|
// If we request enough data, we are done.
|
||||||
|
if (!prefetch_bytes_left) break;
|
||||||
|
|
||||||
|
// Otherwise find next chunk.
|
||||||
|
++chunk_idx;
|
||||||
|
while (chunk_idx >= chunk_refs->size()) {
|
||||||
|
// Go to next list.
|
||||||
|
chunk_idx = 0;
|
||||||
|
++list_idx;
|
||||||
|
ASSIGN_OR_RETURN(chunk_refs, GetChunkRefList(list_idx),
|
||||||
|
"Failed to fetch indirect chunk list %i", list_idx);
|
||||||
|
chunk_list_offset = ChunkListOffset(list_idx);
|
||||||
|
if (!chunk_refs) {
|
||||||
|
// Out of bounds. If we're not at the file size now, it's an error.
|
||||||
|
if (offset != proto_->file_size()) {
|
||||||
|
return MakeStatus(
|
||||||
|
"Read error at position %u. Expected to be at file size %u.",
|
||||||
|
offset, proto_->file_size());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chunk_refs) {
|
||||||
|
// We should be exactly at a chunk boundary now.
|
||||||
|
uint64_t chunk_rel_offset = chunk_refs->at(chunk_idx).offset();
|
||||||
|
if (offset != chunk_list_offset + chunk_rel_offset) {
|
||||||
|
return MakeStatus("Unexpected chunk offset %u, expected %u + %u = %u",
|
||||||
|
offset, chunk_list_offset, chunk_rel_offset,
|
||||||
|
chunk_list_offset + chunk_rel_offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read all data.
|
||||||
|
absl::Status status = data_store_reader_->Get(&chunks);
|
||||||
|
if (!status.ok() || !chunks.ReadDone()) {
|
||||||
|
std::string msg = absl::StrFormat(
|
||||||
|
"Failed to fetch chunk(s) [%s] for file '%s', offset %u, size %u",
|
||||||
|
chunks.ToHexString(
|
||||||
|
[](auto const& chunk) { return chunk.size && !chunk.done; }),
|
||||||
|
proto_->name(), offset, size);
|
||||||
|
return status.ok() ? absl::DataLossError(msg)
|
||||||
|
: WrapStatus(status, "%s", msg);
|
||||||
|
}
|
||||||
|
return size - data_bytes_left;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t Asset::GetNumFetchedFileChunkListsForTesting() {
|
||||||
|
mutex_.AssertNotHeld();
|
||||||
|
absl::ReaderMutexLock read_lock(&mutex_);
|
||||||
|
|
||||||
|
// In contrast to |dir_asset_lists_|, |file_chunk_lists_| might be fetched
|
||||||
|
// out-of-order, e.g. if someone tried to read the end of the file.
|
||||||
|
// Unfetched lists are nullptrs.
|
||||||
|
int num_fetched = 0;
|
||||||
|
for (const std::unique_ptr<ChunkListProto>& list : file_chunk_lists_) {
|
||||||
|
if (list) {
|
||||||
|
++num_fetched;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return num_fetched;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t Asset::GetNumFetchedDirAssetsListsForTesting() {
|
||||||
|
mutex_.AssertNotHeld();
|
||||||
|
absl::ReaderMutexLock read_lock(&mutex_);
|
||||||
|
|
||||||
|
return dir_asset_lists_.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Asset::UpdateProto(const AssetProto* proto) {
|
||||||
|
absl::WriterMutexLock write_lock(&mutex_);
|
||||||
|
proto_lookup_.clear();
|
||||||
|
file_chunk_lists_.clear();
|
||||||
|
dir_asset_lists_.clear();
|
||||||
|
proto_ = proto;
|
||||||
|
if (proto_) {
|
||||||
|
UpdateProtoLookup(proto_->dir_assets());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Asset::IsConsistent(std::string* warning) const {
|
||||||
|
assert(proto_ && warning);
|
||||||
|
absl::ReaderMutexLock read_lock(&mutex_);
|
||||||
|
switch (proto_->type()) {
|
||||||
|
case AssetProto::FILE:
|
||||||
|
if (!proto_lookup_.empty() || !proto_->dir_assets().empty() ||
|
||||||
|
!proto_->dir_indirect_assets().empty()) {
|
||||||
|
*warning = "File asset contains sub-assets";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!proto_->symlink_target().empty()) {
|
||||||
|
*warning = "File asset contains a symlink";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case AssetProto::DIRECTORY:
|
||||||
|
if (!proto_->file_chunks().empty() || !file_chunk_lists_.empty() ||
|
||||||
|
!proto_->file_indirect_chunks().empty()) {
|
||||||
|
*warning = "Directory asset contains file chunks";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!proto_->symlink_target().empty()) {
|
||||||
|
*warning = "Directory asset contains a symlink";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (proto_->file_size() > 0) {
|
||||||
|
*warning = "File size is defined for a directory asset";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case AssetProto::SYMLINK:
|
||||||
|
if (!proto_lookup_.empty() || !proto_->dir_assets().empty() ||
|
||||||
|
!proto_->dir_indirect_assets().empty()) {
|
||||||
|
*warning = "Symlink asset contains sub-assets";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!proto_->file_chunks().empty() || !file_chunk_lists_.empty() ||
|
||||||
|
!proto_->file_indirect_chunks().empty()) {
|
||||||
|
*warning = "Symlink asset contains file chunks";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (proto_->file_size() > 0) {
|
||||||
|
*warning = "File size is defined for a symlink asset";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
*warning = "Undefined asset type";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directory assets should not have any file chunks.
|
||||||
|
// Absolute file chunk offsets for all loaded direct and indirect chunks
|
||||||
|
// should be monotonically increasing.
|
||||||
|
if (proto_->type() == AssetProto::FILE) {
|
||||||
|
// Check direct chunks.
|
||||||
|
size_t total_offset = 0;
|
||||||
|
for (int idx = 0; idx < proto_->file_chunks_size(); ++idx) {
|
||||||
|
if (proto_->file_chunks(idx).offset() < total_offset) {
|
||||||
|
*warning = absl::StrFormat(
|
||||||
|
"Disordered direct chunks: idx=%u, total_offset=%u, "
|
||||||
|
"chunk_offset=%u",
|
||||||
|
idx, total_offset, proto_->file_chunks(idx).offset());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
total_offset = proto_->file_chunks(idx).offset();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check indirect lists.
|
||||||
|
size_t prev_list_offset = total_offset;
|
||||||
|
for (int list_idx = 0; list_idx < proto_->file_indirect_chunks_size();
|
||||||
|
++list_idx) {
|
||||||
|
size_t list_offset = ChunkListOffset(list_idx);
|
||||||
|
if (list_idx == 0 && proto_->file_chunks_size() == 0 &&
|
||||||
|
list_offset != 0) {
|
||||||
|
*warning = absl::StrFormat(
|
||||||
|
"Disordered indirect chunk list: the list offset should be 0, as "
|
||||||
|
"there are no direct file chunks: "
|
||||||
|
"list_offset=%u, previous list_offset=%u",
|
||||||
|
list_offset, prev_list_offset);
|
||||||
|
return false;
|
||||||
|
} else if (list_idx > 0 && (prev_list_offset >= list_offset ||
|
||||||
|
total_offset >= list_offset)) {
|
||||||
|
*warning = absl::StrFormat(
|
||||||
|
"Disordered indirect chunk list: the list offset should increase: "
|
||||||
|
"list_offset=%u, previous list_offset=%u, total_offset=%u",
|
||||||
|
list_offset, prev_list_offset, total_offset);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (file_chunk_lists_.size() <= list_idx ||
|
||||||
|
!file_chunk_lists_[list_idx]) {
|
||||||
|
total_offset = list_offset;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// If the list is fetched, check its chunks' order.
|
||||||
|
for (int chunk_idx = 0;
|
||||||
|
chunk_idx < file_chunk_lists_[list_idx]->chunks_size();
|
||||||
|
++chunk_idx) {
|
||||||
|
const ChunkRefProto& chunk =
|
||||||
|
file_chunk_lists_[list_idx]->chunks(chunk_idx);
|
||||||
|
if (chunk_idx == 0 && chunk.offset() != 0) {
|
||||||
|
*warning = absl::StrFormat(
|
||||||
|
"The offset of the first chunk in the list should be 0: "
|
||||||
|
"list_idx=%u, list_offset=%u, chunk_offset=%u",
|
||||||
|
list_idx, list_offset, chunk.offset());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (chunk.offset() + list_offset < total_offset) {
|
||||||
|
*warning = absl::StrFormat(
|
||||||
|
"Disordered indirect chunk list: list_idx=%u, list_offset=%u, "
|
||||||
|
"offset=%u, chunk_offset=%u",
|
||||||
|
list_idx, list_offset, total_offset, chunk.offset());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
total_offset = list_offset + chunk.offset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (total_offset == 0 && proto_->file_size() == 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// The last absolute offset should be less than the file size.
|
||||||
|
if (total_offset >= proto_->file_size()) {
|
||||||
|
*warning = absl::StrFormat(
|
||||||
|
"The last absolute file offset exceeds the file size: %u >= %u",
|
||||||
|
total_offset, proto_->file_size());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<bool> Asset::FetchNextDirAssetList() {
|
||||||
|
mutex_.AssertNotHeld();
|
||||||
|
assert(proto_);
|
||||||
|
|
||||||
|
{
|
||||||
|
absl::ReaderMutexLock read_lock(&mutex_);
|
||||||
|
|
||||||
|
// Shortcut to prevent acquiring a write lock if everything has been loaded.
|
||||||
|
if (dir_asset_lists_.size() >=
|
||||||
|
static_cast<size_t>(proto_->dir_indirect_assets_size())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::WriterMutexLock write_lock(&mutex_);
|
||||||
|
|
||||||
|
// Check again in case some other thread has run this in the meantime.
|
||||||
|
if (dir_asset_lists_.size() >=
|
||||||
|
static_cast<size_t>(proto_->dir_indirect_assets_size())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read next indirect asset list.
|
||||||
|
const ContentIdProto& id =
|
||||||
|
proto_->dir_indirect_assets(static_cast<int>(dir_asset_lists_.size()));
|
||||||
|
auto list = std::make_unique<AssetListProto>();
|
||||||
|
RETURN_IF_ERROR(data_store_reader_->GetProto(id, list.get()),
|
||||||
|
"Failed to fetch AssetList proto with id %s",
|
||||||
|
ContentId::ToHexString(id));
|
||||||
|
dir_asset_lists_.push_back(std::move(list));
|
||||||
|
UpdateProtoLookup(dir_asset_lists_.back()->assets());
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Asset::UpdateProtoLookup(const RepeatedAssetProto& list) {
|
||||||
|
assert((mutex_.AssertHeld(), true));
|
||||||
|
|
||||||
|
for (const AssetProto& asset : list) {
|
||||||
|
proto_lookup_[asset.name().c_str()] = &asset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int Asset::FindChunkList(uint64_t offset) {
|
||||||
|
assert(proto_);
|
||||||
|
const RepeatedIndirectChunkListProto& lists = proto_->file_indirect_chunks();
|
||||||
|
if (offset >= proto_->file_size()) {
|
||||||
|
// |offset| is not inside the file.
|
||||||
|
return proto_->file_indirect_chunks_size();
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Optimize search by using average chunk size.
|
||||||
|
auto it =
|
||||||
|
std::upper_bound(lists.begin(), lists.end(), offset,
|
||||||
|
[](uint64_t value, const IndirectChunkListProto& list) {
|
||||||
|
return value < list.offset();
|
||||||
|
});
|
||||||
|
return it - lists.begin() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int Asset::FindChunk(const RepeatedChunkRefProto& chunks,
|
||||||
|
uint64_t chunk_list_offset, uint64_t chunk_offset) {
|
||||||
|
assert(chunk_list_offset <= chunk_offset);
|
||||||
|
uint64_t rel_offset = chunk_offset - chunk_list_offset;
|
||||||
|
// TODO: Optimize search by using average chunk size.
|
||||||
|
auto it = std::upper_bound(chunks.begin(), chunks.end(), rel_offset,
|
||||||
|
[](uint64_t value, const ChunkRefProto& ch) {
|
||||||
|
return value < ch.offset();
|
||||||
|
});
|
||||||
|
return it - chunks.begin() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t Asset::ChunkListOffset(int list_idx) const {
|
||||||
|
assert(list_idx >= -1 && proto_ &&
|
||||||
|
list_idx <= proto_->file_indirect_chunks_size());
|
||||||
|
|
||||||
|
if (list_idx == -1) return 0;
|
||||||
|
if (list_idx < proto_->file_indirect_chunks_size())
|
||||||
|
return proto_->file_indirect_chunks(list_idx).offset();
|
||||||
|
return proto_->file_size();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t Asset::ChunkSize(int list_idx, int chunk_idx,
|
||||||
|
const RepeatedChunkRefProto* chunk_refs) {
|
||||||
|
assert(chunk_idx >= 0 && chunk_idx < chunk_refs->size());
|
||||||
|
assert(list_idx >= -1 && proto_ &&
|
||||||
|
list_idx <= proto_->file_indirect_chunks_size());
|
||||||
|
|
||||||
|
// If the next chunk is in the same chunk_refs list, just return offset diff.
|
||||||
|
if (chunk_idx + 1 < chunk_refs->size()) {
|
||||||
|
return chunk_refs->at(chunk_idx + 1).offset() -
|
||||||
|
chunk_refs->at(chunk_idx).offset();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the next chunk is on another list, use the next list's offset.
|
||||||
|
// Note that this also works for the last list, where
|
||||||
|
// GetChunkListOffset(list_idx + 1) returns the file size.
|
||||||
|
uint64_t chunk_absolute_offset =
|
||||||
|
chunk_refs->at(chunk_idx).offset() + ChunkListOffset(list_idx);
|
||||||
|
return ChunkListOffset(list_idx + 1) - chunk_absolute_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<const RepeatedChunkRefProto*> Asset::GetChunkRefList(
|
||||||
|
int list_idx) {
|
||||||
|
mutex_.AssertNotHeld();
|
||||||
|
assert(list_idx >= -1 && proto_ &&
|
||||||
|
list_idx <= proto_->file_indirect_chunks_size());
|
||||||
|
|
||||||
|
if (list_idx == -1) {
|
||||||
|
// Direct chunk list.
|
||||||
|
return &proto_->file_chunks();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (list_idx == proto_->file_indirect_chunks_size()) {
|
||||||
|
// Indicates EOF.
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
absl::ReaderMutexLock read_lock(&mutex_);
|
||||||
|
|
||||||
|
// Do a quick check first if the list is already loaded.
|
||||||
|
// This only requires a read lock.
|
||||||
|
if (static_cast<size_t>(list_idx) < file_chunk_lists_.size() &&
|
||||||
|
file_chunk_lists_[list_idx]) {
|
||||||
|
return &file_chunk_lists_[list_idx]->chunks();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::WriterMutexLock write_lock(&mutex_);
|
||||||
|
|
||||||
|
// Indirect chunk list. Check if it has to be fetched.
|
||||||
|
if (file_chunk_lists_.size() < static_cast<size_t>(list_idx) + 1) {
|
||||||
|
file_chunk_lists_.resize(list_idx + 1);
|
||||||
|
}
|
||||||
|
if (!file_chunk_lists_[list_idx]) {
|
||||||
|
auto list = std::make_unique<ChunkListProto>();
|
||||||
|
const ContentIdProto& list_id =
|
||||||
|
proto_->file_indirect_chunks(list_idx).chunk_list_id();
|
||||||
|
RETURN_IF_ERROR(data_store_reader_->GetProto(list_id, list.get()),
|
||||||
|
"Failed to fetch ChunkListProto with id %s",
|
||||||
|
ContentId::ToHexString(list_id));
|
||||||
|
file_chunk_lists_[list_idx] = std::move(list);
|
||||||
|
}
|
||||||
|
return &file_chunk_lists_[list_idx]->chunks();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
182
cdc_fuse_fs/asset.h
Normal file
182
cdc_fuse_fs/asset.h
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_FUSE_FS_ASSET_H_
|
||||||
|
#define CDC_FUSE_FS_ASSET_H_
|
||||||
|
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
#include "absl/base/thread_annotations.h"
|
||||||
|
#include "absl/status/statusor.h"
|
||||||
|
#include "absl/strings/string_view.h"
|
||||||
|
#include "absl/synchronization/mutex.h"
|
||||||
|
#include "manifest/content_id.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class Buffer;
|
||||||
|
class DataStoreReader;
|
||||||
|
|
||||||
|
// Wraps an asset proto for reading and adds additional functionality like name
|
||||||
|
// lookup maps and lazy loading of directory assets and file chunks.
|
||||||
|
// This class is accessed from multiple threads and has to be THREAD-SAFE.
|
||||||
|
class Asset {
|
||||||
|
public:
|
||||||
|
// Inode key type (cmp. fuse_ino_t).
|
||||||
|
using ino_t = uint64_t;
|
||||||
|
|
||||||
|
// Creates a new asset. Must call Initialize() before using it.
|
||||||
|
Asset();
|
||||||
|
~Asset();
|
||||||
|
|
||||||
|
// Make it non-copyable, non-assignable to prevent accidental misuse.
|
||||||
|
Asset(const Asset& other) = delete;
|
||||||
|
Asset& operator=(const Asset& other) = delete;
|
||||||
|
|
||||||
|
// Initialize the class. Must be called right after creation.
|
||||||
|
// NOT thread-safe! (OK as usually no other threads have access at this time.)
|
||||||
|
void Initialize(ino_t parent_ino, DataStoreReader* data_store_reader,
|
||||||
|
const AssetProto* proto);
|
||||||
|
|
||||||
|
// Returns the parent inode id passed to Initialize().
|
||||||
|
// Thread-safe.
|
||||||
|
ino_t parent_ino() const { return parent_ino_; }
|
||||||
|
|
||||||
|
// Returns the asset proto passed to Initialize().
|
||||||
|
// Thread-safe.
|
||||||
|
const AssetProto* proto() const { return proto_; }
|
||||||
|
|
||||||
|
// Returns all child asset protos. Loads them if necessary.
|
||||||
|
// Returns an error if loading an indirect asset list fails.
|
||||||
|
// Returns an InvalidArugmentError if *this is not a directory asset.
|
||||||
|
// |proto_| must be set.
|
||||||
|
// Thread-safe.
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> GetAllChildProtos()
|
||||||
|
ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Returns loaded children's protos. Thread-safe.
|
||||||
|
std::vector<const AssetProto*> GetLoadedChildProtos() const;
|
||||||
|
|
||||||
|
// For directory assets, looks up a child asset by name. Loads indirect asset
|
||||||
|
// lists if needed. Returns an error if loading asset lists fails.
|
||||||
|
// Returns nullptr if the asset cannot be found.
|
||||||
|
// Returns an InvalidArugmentError if *this is not a directory asset.
|
||||||
|
// |proto_| must be set.
|
||||||
|
// Thread-safe.
|
||||||
|
absl::StatusOr<const AssetProto*> Lookup(const char* name)
|
||||||
|
ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// For file assets, reads |size| bytes from the file, starting from |offset|,
|
||||||
|
// and puts the result into |data|. Returns the number of bytes read or 0 if
|
||||||
|
// |offset| >= file size. Loads indirect chunk lists if needed.
|
||||||
|
// Returns an error if loading chunk lists fails.
|
||||||
|
// Returns an InvalidArugmentError if *this is not a file asset.
|
||||||
|
// |proto_| must be set.
|
||||||
|
// Thread-safe.
|
||||||
|
absl::StatusOr<uint64_t> Read(uint64_t offset, void* data, uint64_t size);
|
||||||
|
|
||||||
|
size_t GetNumFetchedFileChunkListsForTesting() ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
size_t GetNumFetchedDirAssetsListsForTesting() ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Updates asset proto, all corresponding internal structures are cleaned up.
|
||||||
|
// This is an expensive operation as the previously created internal
|
||||||
|
// structures are removed. Thread-safe.
|
||||||
|
void UpdateProto(const AssetProto* proto) ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Checks consistency of the asset, for example: directory assets should not
|
||||||
|
// contain any file chunks. Any discovered inconsistencies are defined in
|
||||||
|
// |warning|.
|
||||||
|
bool IsConsistent(std::string* warning) const ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Loads the next indirect directory asset list.
|
||||||
|
// Returns true if a list was fetched.
|
||||||
|
// Returns false if all lists have already been fetched.
|
||||||
|
// Returns an error if fetching an indirect asset list failed.
|
||||||
|
// |proto_| must be set.
|
||||||
|
absl::StatusOr<bool> FetchNextDirAssetList() ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Puts all assets from |list| into |proto_lookup_|.
|
||||||
|
void UpdateProtoLookup(const RepeatedAssetProto& list)
|
||||||
|
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
|
|
||||||
|
// Returns the index of the indirect chunk list that |offset| falls into or -1
|
||||||
|
// if |offset| is contained in the direct chunk list. Returns the number of
|
||||||
|
// indirect lists in |proto_| if |offset| is larger or equal to the file size.
|
||||||
|
// |proto_| must be set.
|
||||||
|
int FindChunkList(uint64_t offset);
|
||||||
|
|
||||||
|
// Returns the index of the chunk that |chunk_offset| falls into. The offsets
|
||||||
|
// in list |chunks| are interpreted relative to |chunk_list_offset|.
|
||||||
|
int FindChunk(const RepeatedChunkRefProto& chunks, uint64_t chunk_list_offset,
|
||||||
|
uint64_t chunk_offset);
|
||||||
|
|
||||||
|
// Gets the direct or an indirect chunk list. Fetches indirect chunk lists if
|
||||||
|
// necessary. |list_idx| must be in [-1, number of indirect chunk lists].
|
||||||
|
//
|
||||||
|
// Returns the direct chunk list if |list_idx| is -1. Returns nullptr if
|
||||||
|
// |list_idx| equals the number of indirect chunk lists. Returns the indirect
|
||||||
|
// chunk list at index |list_idx| otherwise. Returns an error if fetching an
|
||||||
|
// indirect chunk list fails.
|
||||||
|
// |proto_| must be set.
|
||||||
|
absl::StatusOr<const RepeatedChunkRefProto*> GetChunkRefList(int list_idx)
|
||||||
|
ABSL_LOCKS_EXCLUDED(mutex_);
|
||||||
|
|
||||||
|
// Returns the absolute offset of the chunk list with index |list_idx|.
|
||||||
|
// |list_idx| must be in [-1, number of indirect chunk lists]. -1 refers to
|
||||||
|
// the direct chunk list, in which case 0 is returned. If |list_idx| equals
|
||||||
|
// the number of indirect chunk lists, the file size is returned. Otherwise,
|
||||||
|
// the corresponding indirect chunk list's offset is returned.
|
||||||
|
// |proto_| must be set.
|
||||||
|
uint64_t ChunkListOffset(int list_idx) const;
|
||||||
|
|
||||||
|
// Returns the chunk size of the chunk with index |chunk_idx| on the chunk
|
||||||
|
// list with index |list_idx| and corresponding proto |chunk_refs|.
|
||||||
|
// |list_idx| must be in [-1, number of indirect chunk lists - 1].
|
||||||
|
// |chunk_idx| must be in [0, chunk_refs->size()].
|
||||||
|
// |proto_| must be set.
|
||||||
|
uint64_t ChunkSize(int list_idx, int chunk_idx,
|
||||||
|
const RepeatedChunkRefProto* chunk_refs);
|
||||||
|
|
||||||
|
// Parent inode, for ".." in dir listings.
|
||||||
|
ino_t parent_ino_ = 0;
|
||||||
|
|
||||||
|
// Interface for loading content (chunks, assets).
|
||||||
|
DataStoreReader* data_store_reader_ = nullptr;
|
||||||
|
|
||||||
|
// Corresponding asset proto.
|
||||||
|
const AssetProto* proto_ = nullptr;
|
||||||
|
|
||||||
|
// RW mutex for increased thread-safetiness.
|
||||||
|
mutable absl::Mutex mutex_;
|
||||||
|
|
||||||
|
// Maps asset proto names to asset protos for all protos loaded so far.
|
||||||
|
// The string views point directly into asset protos.
|
||||||
|
std::unordered_map<absl::string_view, const AssetProto*> proto_lookup_
|
||||||
|
ABSL_GUARDED_BY(mutex_);
|
||||||
|
|
||||||
|
// Fetched |file_indirect_chunks| chunk lists.
|
||||||
|
std::vector<std::unique_ptr<ChunkListProto>> file_chunk_lists_
|
||||||
|
ABSL_GUARDED_BY(mutex_);
|
||||||
|
|
||||||
|
// Fetched |dir_indirect_assets| fields so far.
|
||||||
|
std::vector<std::unique_ptr<AssetListProto>> dir_asset_lists_
|
||||||
|
ABSL_GUARDED_BY(mutex_);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_FUSE_FS_ASSET_H_
|
||||||
112
cdc_fuse_fs/asset_stream_client.cc
Normal file
112
cdc_fuse_fs/asset_stream_client.cc
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_fuse_fs/asset_stream_client.h"
|
||||||
|
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/stopwatch.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
using GetContentRequest = proto::GetContentRequest;
|
||||||
|
using GetContentResponse = proto::GetContentResponse;
|
||||||
|
using SendCachedContentIdsRequest = proto::SendCachedContentIdsRequest;
|
||||||
|
using SendCachedContentIdsResponse = proto::SendCachedContentIdsResponse;
|
||||||
|
|
||||||
|
AssetStreamClient::AssetStreamClient(std::shared_ptr<grpc::Channel> channel,
|
||||||
|
bool enable_stats)
|
||||||
|
: enable_stats_(enable_stats) {
|
||||||
|
stub_ = AssetStreamService::NewStub(std::move(channel));
|
||||||
|
}
|
||||||
|
|
||||||
|
AssetStreamClient::~AssetStreamClient() = default;
|
||||||
|
|
||||||
|
size_t TotalDataSize(const RepeatedStringProto& data) {
|
||||||
|
size_t total_size = 0;
|
||||||
|
for (const std::string& s : data) {
|
||||||
|
total_size += s.size();
|
||||||
|
}
|
||||||
|
return total_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<std::string> AssetStreamClient::GetContent(
|
||||||
|
const ContentIdProto& id) {
|
||||||
|
GetContentRequest request;
|
||||||
|
*request.add_id() = id;
|
||||||
|
if (enable_stats_)
|
||||||
|
request.set_thread_id(thread_id_hash_(std::this_thread::get_id()));
|
||||||
|
|
||||||
|
grpc::ClientContext context;
|
||||||
|
GetContentResponse response;
|
||||||
|
|
||||||
|
Stopwatch sw;
|
||||||
|
grpc::Status status = stub_->GetContent(&context, request, &response);
|
||||||
|
LOG_DEBUG("GRPC TIME %0.3f sec for %u chunks with %u bytes",
|
||||||
|
sw.ElapsedSeconds(), response.data().size(),
|
||||||
|
TotalDataSize(response.data()));
|
||||||
|
|
||||||
|
if (!status.ok()) {
|
||||||
|
return absl::Status(static_cast<absl::StatusCode>(status.error_code()),
|
||||||
|
status.error_message());
|
||||||
|
}
|
||||||
|
assert(response.data_size() == 1);
|
||||||
|
return std::move(*response.mutable_data(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::StatusOr<RepeatedStringProto> AssetStreamClient::GetContent(
|
||||||
|
RepeatedContentIdProto chunk_ids) {
|
||||||
|
if (chunk_ids.empty()) return RepeatedStringProto();
|
||||||
|
|
||||||
|
GetContentRequest request;
|
||||||
|
*request.mutable_id() = std::move(chunk_ids);
|
||||||
|
if (enable_stats_)
|
||||||
|
request.set_thread_id(thread_id_hash_(std::this_thread::get_id()));
|
||||||
|
|
||||||
|
grpc::ClientContext context;
|
||||||
|
GetContentResponse response;
|
||||||
|
|
||||||
|
Stopwatch sw;
|
||||||
|
grpc::Status status = stub_->GetContent(&context, request, &response);
|
||||||
|
|
||||||
|
if (!status.ok()) {
|
||||||
|
return absl::Status(static_cast<absl::StatusCode>(status.error_code()),
|
||||||
|
status.error_message());
|
||||||
|
}
|
||||||
|
LOG_DEBUG("GRPC TIME %0.3f sec for %zu bytes", sw.ElapsedSeconds(),
|
||||||
|
TotalDataSize(response.data()));
|
||||||
|
|
||||||
|
return std::move(*response.mutable_data());
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status AssetStreamClient::SendCachedContentIds(
|
||||||
|
std::vector<ContentIdProto> content_ids) {
|
||||||
|
SendCachedContentIdsRequest request;
|
||||||
|
for (ContentIdProto& id : content_ids) *request.add_id() = std::move(id);
|
||||||
|
|
||||||
|
grpc::ClientContext context;
|
||||||
|
SendCachedContentIdsResponse response;
|
||||||
|
|
||||||
|
grpc::Status status =
|
||||||
|
stub_->SendCachedContentIds(&context, request, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return absl::Status(static_cast<absl::StatusCode>(status.error_code()),
|
||||||
|
status.error_message());
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
62
cdc_fuse_fs/asset_stream_client.h
Normal file
62
cdc_fuse_fs/asset_stream_client.h
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_FUSE_FS_ASSET_STREAM_CLIENT_H_
|
||||||
|
#define CDC_FUSE_FS_ASSET_STREAM_CLIENT_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "absl/status/statusor.h"
|
||||||
|
#include "grpcpp/channel.h"
|
||||||
|
#include "manifest/manifest_proto_defs.h"
|
||||||
|
#include "proto/asset_stream_service.grpc.pb.h"
|
||||||
|
|
||||||
|
namespace grpc_impl {
|
||||||
|
class Channel;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// gRpc client for streaming assets to a gamelets. The client runs inside the
|
||||||
|
// CDC Fuse filesystem and requests chunks from the workstation.
|
||||||
|
class AssetStreamClient {
|
||||||
|
public:
|
||||||
|
// |channel| is a grpc channel to use.
|
||||||
|
// |enable_stats| determines whether additional statistics are sent.
|
||||||
|
AssetStreamClient(std::shared_ptr<grpc::Channel> channel, bool enable_stats);
|
||||||
|
~AssetStreamClient();
|
||||||
|
|
||||||
|
// Gets the content of the chunk with given |id|.
|
||||||
|
absl::StatusOr<std::string> GetContent(const ContentIdProto& id);
|
||||||
|
absl::StatusOr<RepeatedStringProto> GetContent(
|
||||||
|
RepeatedContentIdProto chunk_ids);
|
||||||
|
|
||||||
|
// Sends the IDs of all cached chunks to the workstation for statistical
|
||||||
|
// purposes.
|
||||||
|
absl::Status SendCachedContentIds(std::vector<ContentIdProto> content_ids);
|
||||||
|
|
||||||
|
private:
|
||||||
|
using AssetStreamService = proto::AssetStreamService;
|
||||||
|
std::unique_ptr<AssetStreamService::Stub> stub_;
|
||||||
|
bool enable_stats_;
|
||||||
|
std::hash<std::thread::id> thread_id_hash_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_FUSE_FS_ASSET_STREAM_CLIENT_H_
|
||||||
820
cdc_fuse_fs/asset_test.cc
Normal file
820
cdc_fuse_fs/asset_test.cc
Normal file
@@ -0,0 +1,820 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_fuse_fs/asset.h"
|
||||||
|
|
||||||
|
#include "absl/strings/match.h"
|
||||||
|
#include "common/buffer.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/status_test_macros.h"
|
||||||
|
#include "data_store/mem_data_store.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class AssetTest : public ::testing::Test {
|
||||||
|
public:
|
||||||
|
AssetTest()
|
||||||
|
: bad_id_(ContentId::FromDataString(std::string("does not exist"))) {
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n) {
|
||||||
|
child_protos_[n].set_name("file" + std::to_string(n));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static constexpr Asset::ino_t kParentIno = 1;
|
||||||
|
|
||||||
|
// Adds chunks with the data given by |data_vec| to the store, and
|
||||||
|
// adds references to the chunks to |list|. Updates |offset|.
|
||||||
|
void AddChunks(std::vector<std::vector<char>> data_vec, uint64_t* offset,
|
||||||
|
RepeatedChunkRefProto* list) {
|
||||||
|
for (auto& data : data_vec) {
|
||||||
|
ChunkRefProto* chunk_ref = list->Add();
|
||||||
|
chunk_ref->set_offset(*offset);
|
||||||
|
*offset += data.size();
|
||||||
|
*chunk_ref->mutable_chunk_id() = store_.AddData(std::move(data));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds chunks with the data given by |data_vec| to the store,
|
||||||
|
// creates an indirect chunk list from those chunks and adds a reference to
|
||||||
|
// that list to |list|.
|
||||||
|
void AddIndirectChunks(std::vector<std::vector<char>> data_vec,
|
||||||
|
uint64_t* offset,
|
||||||
|
RepeatedIndirectChunkListProto* list) {
|
||||||
|
uint64_t indirect_list_offset = *offset;
|
||||||
|
*offset = 0;
|
||||||
|
|
||||||
|
ChunkListProto chunk_list;
|
||||||
|
AddChunks(data_vec, offset, chunk_list.mutable_chunks());
|
||||||
|
|
||||||
|
IndirectChunkListProto* indirect_list = list->Add();
|
||||||
|
indirect_list->set_offset(indirect_list_offset);
|
||||||
|
*indirect_list->mutable_chunk_list_id() = store_.AddProto(chunk_list);
|
||||||
|
*offset += indirect_list_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks if the given list |protos| contains an asset having |name|.
|
||||||
|
static bool ContainsAsset(const std::vector<const AssetProto*>& protos,
|
||||||
|
const std::string& name) {
|
||||||
|
return std::find_if(protos.begin(), protos.end(), [=](const AssetProto* p) {
|
||||||
|
return p->name() == name;
|
||||||
|
}) != protos.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
MemDataStore store_;
|
||||||
|
AssetProto proto_;
|
||||||
|
Asset asset_;
|
||||||
|
|
||||||
|
static constexpr size_t kNumChildProtos = 4;
|
||||||
|
AssetProto child_protos_[kNumChildProtos];
|
||||||
|
|
||||||
|
const ContentIdProto bad_id_;
|
||||||
|
std::string asset_check_;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(AssetTest, BasicGetters) {
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_EQ(asset_.parent_ino(), kParentIno);
|
||||||
|
EXPECT_EQ(asset_.proto(), &proto_);
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ("Undefined asset type", asset_check_.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, GetAllChildProtosDirectSucceeds) {
|
||||||
|
// Put all children into the direct asset list.
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n)
|
||||||
|
*proto_.add_dir_assets() = child_protos_[n];
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> protos =
|
||||||
|
asset_.GetAllChildProtos();
|
||||||
|
|
||||||
|
ASSERT_OK(protos);
|
||||||
|
ASSERT_EQ(protos->size(), kNumChildProtos);
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n) {
|
||||||
|
EXPECT_TRUE(ContainsAsset(protos.value(), child_protos_[n].name()))
|
||||||
|
<< "Could not find asset " << child_protos_[n].name();
|
||||||
|
}
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, GetAllChildProtosIndirectSucceeds) {
|
||||||
|
// Put child0 into the direct asset list and children 1-N into indirect lists.
|
||||||
|
*proto_.add_dir_assets() = child_protos_[0];
|
||||||
|
for (size_t n = 1; n < kNumChildProtos; ++n) {
|
||||||
|
AssetListProto list;
|
||||||
|
*list.add_assets() = child_protos_[n];
|
||||||
|
*proto_.add_dir_indirect_assets() = store_.AddProto(list);
|
||||||
|
}
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> protos =
|
||||||
|
asset_.GetAllChildProtos();
|
||||||
|
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedDirAssetsListsForTesting(),
|
||||||
|
kNumChildProtos - 1);
|
||||||
|
ASSERT_OK(protos);
|
||||||
|
ASSERT_EQ(protos->size(), kNumChildProtos);
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n) {
|
||||||
|
EXPECT_TRUE(ContainsAsset(protos.value(), child_protos_[n].name()))
|
||||||
|
<< "Could not find asset " << child_protos_[n].name();
|
||||||
|
}
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, GetAllChildProtosWithBadListIdFails) {
|
||||||
|
*proto_.add_dir_indirect_assets() = bad_id_;
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> protos =
|
||||||
|
asset_.GetAllChildProtos();
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(protos);
|
||||||
|
EXPECT_TRUE(absl::StrContains(protos.status().message(),
|
||||||
|
"Failed to fetch directory assets"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, GetAllChildProtosWithWrongTypeFails) {
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> protos =
|
||||||
|
asset_.GetAllChildProtos();
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(protos);
|
||||||
|
EXPECT_TRUE(absl::IsInvalidArgument(protos.status()));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, GetLoadedChildProtosSucceedsForEmpty) {
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
EXPECT_TRUE(asset_.GetLoadedChildProtos().empty());
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ("Undefined asset type", asset_check_.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, GetLoadedChildProtosSucceedsForNonEmpty) {
|
||||||
|
// Put child0 into the direct asset list and children 1-N into indirect lists.
|
||||||
|
*proto_.add_dir_assets() = child_protos_[0];
|
||||||
|
for (size_t n = 1; n < kNumChildProtos; ++n) {
|
||||||
|
AssetListProto list;
|
||||||
|
*list.add_assets() = child_protos_[n];
|
||||||
|
*proto_.add_dir_indirect_assets() = store_.AddProto(list);
|
||||||
|
}
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
// The direct list is always loaded.
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
std::vector<const AssetProto*> protos = asset_.GetLoadedChildProtos();
|
||||||
|
ASSERT_EQ(protos.size(), 1);
|
||||||
|
EXPECT_EQ(protos[0]->name(), child_protos_[0].name());
|
||||||
|
|
||||||
|
// A lookup for the first child triggers loading of the first indirect list.
|
||||||
|
EXPECT_OK(asset_.Lookup(child_protos_[1].name().c_str()));
|
||||||
|
protos = asset_.GetLoadedChildProtos();
|
||||||
|
ASSERT_EQ(protos.size(), 2);
|
||||||
|
EXPECT_TRUE(ContainsAsset(protos, child_protos_[0].name()));
|
||||||
|
EXPECT_TRUE(ContainsAsset(protos, child_protos_[1].name()));
|
||||||
|
|
||||||
|
// GetAllChildProtos() triggers loading of all indirect lists.
|
||||||
|
EXPECT_OK(asset_.GetAllChildProtos());
|
||||||
|
protos = asset_.GetLoadedChildProtos();
|
||||||
|
ASSERT_EQ(protos.size(), 4u);
|
||||||
|
for (size_t n = 0; n < protos.size(); ++n) {
|
||||||
|
EXPECT_TRUE(ContainsAsset(protos, child_protos_[n].name()))
|
||||||
|
<< "Could not find asset " << child_protos_[n].name();
|
||||||
|
}
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, LookupSucceeds) {
|
||||||
|
// Put child0 into the direct asset list and children 1-N into indirect lists.
|
||||||
|
*proto_.add_dir_assets() = child_protos_[0];
|
||||||
|
for (size_t n = 1; n < kNumChildProtos; ++n) {
|
||||||
|
AssetListProto list;
|
||||||
|
*list.add_assets() = child_protos_[n];
|
||||||
|
*proto_.add_dir_indirect_assets() = store_.AddProto(list);
|
||||||
|
}
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
// Indirect asset lists should be fetched in a lazy fashion.
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<const AssetProto*> file0 = asset_.Lookup("file0");
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedDirAssetsListsForTesting(), 0);
|
||||||
|
absl::StatusOr<const AssetProto*> file1 = asset_.Lookup("file1");
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedDirAssetsListsForTesting(), 1);
|
||||||
|
absl::StatusOr<const AssetProto*> file3 = asset_.Lookup("file3");
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedDirAssetsListsForTesting(), 3);
|
||||||
|
|
||||||
|
ASSERT_OK(file0);
|
||||||
|
ASSERT_OK(file1);
|
||||||
|
ASSERT_OK(file3);
|
||||||
|
|
||||||
|
ASSERT_NE(*file0, nullptr);
|
||||||
|
ASSERT_NE(*file1, nullptr);
|
||||||
|
ASSERT_NE(*file3, nullptr);
|
||||||
|
|
||||||
|
EXPECT_EQ((*file0)->name(), child_protos_[0].name());
|
||||||
|
EXPECT_EQ((*file1)->name(), child_protos_[1].name());
|
||||||
|
EXPECT_EQ((*file3)->name(), child_protos_[3].name());
|
||||||
|
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, LookupNotFoundSucceeds) {
|
||||||
|
// Put child0 into the direct asset list and children 1-N into indirect lists.
|
||||||
|
*proto_.add_dir_assets() = child_protos_[0];
|
||||||
|
for (size_t n = 1; n < kNumChildProtos; ++n) {
|
||||||
|
AssetListProto list;
|
||||||
|
*list.add_assets() = child_protos_[n];
|
||||||
|
*proto_.add_dir_indirect_assets() = store_.AddProto(list);
|
||||||
|
}
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<const AssetProto*> proto = asset_.Lookup("non_existing");
|
||||||
|
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedDirAssetsListsForTesting(),
|
||||||
|
kNumChildProtos - 1);
|
||||||
|
ASSERT_OK(proto);
|
||||||
|
ASSERT_EQ(*proto, nullptr);
|
||||||
|
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, LookupWithWrongTypeFails) {
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<const AssetProto*> proto = asset_.Lookup("foo");
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(proto);
|
||||||
|
EXPECT_TRUE(absl::IsInvalidArgument(proto.status()));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, LookupWithBadListIdFails) {
|
||||||
|
*proto_.add_dir_assets() = child_protos_[0];
|
||||||
|
*proto_.add_dir_indirect_assets() = bad_id_;
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
// This should succeed since 'file0' on the direct assets list.
|
||||||
|
ASSERT_OK(asset_.Lookup("file0"));
|
||||||
|
|
||||||
|
// This should fail since it should trigger loading the bad id.
|
||||||
|
absl::StatusOr<const AssetProto*> proto = asset_.Lookup("file1");
|
||||||
|
ASSERT_NOT_OK(proto);
|
||||||
|
EXPECT_TRUE(absl::StrContains(proto.status().message(),
|
||||||
|
"Failed to fetch directory assets"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadDirectSucceeds) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1, 2}, {3, 4}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(4);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, 4);
|
||||||
|
EXPECT_EQ(data, std::vector<char>({1, 2, 3, 4}));
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadIndirectSucceeds) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1, 2}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
AddIndirectChunks({{3}, {4, 5, 6}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({{7, 8, 9}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(9);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, 9);
|
||||||
|
EXPECT_EQ(data, std::vector<char>({1, 2, 3, 4, 5, 6, 7, 8, 9}));
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadIndirectOnlySucceeds) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddIndirectChunks({{1, 2}}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(2);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, 2);
|
||||||
|
EXPECT_EQ(data, std::vector<char>({1, 2}));
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ("", asset_check_.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadWithWrongType) {
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(1);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(bytes_read);
|
||||||
|
EXPECT_TRUE(absl::IsInvalidArgument(bytes_read.status()));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadIndirectWithBadListIdFails) {
|
||||||
|
IndirectChunkListProto* indirect_list = proto_.add_file_indirect_chunks();
|
||||||
|
indirect_list->set_offset(0);
|
||||||
|
*indirect_list->mutable_chunk_list_id() = bad_id_;
|
||||||
|
proto_.set_file_size(1);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(1);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(bytes_read);
|
||||||
|
EXPECT_TRUE(absl::StrContains(bytes_read.status().message(),
|
||||||
|
"Failed to fetch indirect chunk list 0"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadFetchesIndirectListsLazily) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{0, 1, 2}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
AddIndirectChunks({{3}}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({{4, 5, 6}, {7}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({{8, 9}}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedFileChunkListsForTesting(), 0);
|
||||||
|
|
||||||
|
// Read direct chunks. Should not trigger indirect reads.
|
||||||
|
std::vector<char> data(10);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read = asset_.Read(0, data.data(), 3);
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedFileChunkListsForTesting(), 0);
|
||||||
|
|
||||||
|
// Read an indirect chunk near the end ({ {8, 9} }).
|
||||||
|
bytes_read = asset_.Read(8, data.data(), 1);
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedFileChunkListsForTesting(), 1);
|
||||||
|
|
||||||
|
// Read an indirect chunk in the beginning ({ {3} }).
|
||||||
|
bytes_read = asset_.Read(3, data.data(), 1);
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedFileChunkListsForTesting(), 2);
|
||||||
|
|
||||||
|
// Read an indirect chunk in the middle ({ {4, 5, 6}, {7} }).
|
||||||
|
bytes_read = asset_.Read(4, data.data(), 4);
|
||||||
|
EXPECT_EQ(asset_.GetNumFetchedFileChunkListsForTesting(), 3);
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadEmptySucceeds) {
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
std::vector<char> data(4);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadEmptyDirectChunkSucceeds) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{}, {1}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(4);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, 1);
|
||||||
|
data.resize(1);
|
||||||
|
EXPECT_EQ(data, std::vector<char>({1}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadEmptyIndirectChunkListFails) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
AddIndirectChunks({}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({{}}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({{2}}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(4);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, 2);
|
||||||
|
data.resize(2);
|
||||||
|
EXPECT_EQ(data, std::vector<char>({1, 2}));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadWithBadFileSizeFails) {
|
||||||
|
// Construct a case where the second chunk is empty, but file size indicates
|
||||||
|
// that it should be 1 byte long. Reading that byte should fail.
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1}, {}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
proto_.set_file_size(offset + 1);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(1);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(1, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(bytes_read);
|
||||||
|
EXPECT_TRUE(
|
||||||
|
absl::StrContains(bytes_read.status().message(),
|
||||||
|
"requested offset 0 is larger or equal than size 0"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadWithBadChunkIdSizeFails) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
*proto_.mutable_file_chunks(0)->mutable_chunk_id() = bad_id_;
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(1);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(bytes_read);
|
||||||
|
EXPECT_TRUE(absl::StrContains(bytes_read.status().message(),
|
||||||
|
"Failed to fetch chunk(s)"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadWithBadOffsetFails) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1, 2, 3}, {4, 5, 6}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
proto_.mutable_file_chunks(1)->set_offset(4); // Instead of 3.
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(6);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(bytes_read);
|
||||||
|
EXPECT_TRUE(absl::StrContains(bytes_read.status().message(),
|
||||||
|
"requested size 4 at offset 0"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadEmptyWithBadFileSize) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({}, &offset, proto_.mutable_file_chunks());
|
||||||
|
proto_.set_file_size(1);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(1);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(bytes_read);
|
||||||
|
EXPECT_TRUE(absl::StrContains(bytes_read.status().message(),
|
||||||
|
"Invalid chunk ref list"));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, ReadWithOffsetAndSizeSucceeds) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{0, 1}, {}, {2}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
AddIndirectChunks({{3}, {4, 5, 6}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({{7, 8, 9}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
// Test all kinds of different permutations of offsets and sizes.
|
||||||
|
std::vector<char> expected_data;
|
||||||
|
for (offset = 0; offset < 12; ++offset) {
|
||||||
|
for (uint64_t size = 0; size < 12; ++size) {
|
||||||
|
expected_data.clear();
|
||||||
|
for (uint64_t n = offset; n < std::min<uint64_t>(offset + size, 10);
|
||||||
|
++n) {
|
||||||
|
expected_data.push_back(static_cast<char>(n));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<char> data(size);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(offset, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, expected_data.size());
|
||||||
|
data.resize(expected_data.size());
|
||||||
|
EXPECT_EQ(data, expected_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, UpdateProtoWithEmptyAssetSucceeds) {
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
// Put all children into the direct asset list.
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n)
|
||||||
|
*proto_.add_dir_assets() = child_protos_[n];
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> protos =
|
||||||
|
asset_.GetAllChildProtos();
|
||||||
|
ASSERT_OK(protos);
|
||||||
|
ASSERT_EQ(protos->size(), kNumChildProtos);
|
||||||
|
|
||||||
|
AssetProto proto_updated;
|
||||||
|
proto_updated.set_type(AssetProto::DIRECTORY);
|
||||||
|
asset_.UpdateProto(&proto_updated);
|
||||||
|
protos = asset_.GetAllChildProtos();
|
||||||
|
ASSERT_OK(protos);
|
||||||
|
ASSERT_TRUE(protos->empty());
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, UpdateProtoFromEmptyAssetSucceeds) {
|
||||||
|
AssetProto empty_proto;
|
||||||
|
empty_proto.set_type(AssetProto::DIRECTORY);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &empty_proto);
|
||||||
|
absl::StatusOr<std::vector<const AssetProto*>> protos =
|
||||||
|
asset_.GetAllChildProtos();
|
||||||
|
ASSERT_OK(protos);
|
||||||
|
ASSERT_TRUE(protos->empty());
|
||||||
|
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
// Put all children into the direct asset list.
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n)
|
||||||
|
*proto_.add_dir_assets() = child_protos_[n];
|
||||||
|
asset_.UpdateProto(&proto_);
|
||||||
|
|
||||||
|
protos = asset_.GetAllChildProtos();
|
||||||
|
ASSERT_OK(protos);
|
||||||
|
ASSERT_EQ(protos->size(), kNumChildProtos);
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, AssetProtoComparison) {
|
||||||
|
AssetProto a;
|
||||||
|
AssetProto b;
|
||||||
|
EXPECT_EQ(a, b);
|
||||||
|
|
||||||
|
a.set_type(AssetProto::DIRECTORY);
|
||||||
|
b.set_type(AssetProto::FILE);
|
||||||
|
EXPECT_NE(a, b);
|
||||||
|
|
||||||
|
b.set_type(AssetProto::DIRECTORY);
|
||||||
|
EXPECT_EQ(a, b);
|
||||||
|
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n)
|
||||||
|
*a.add_dir_assets() = child_protos_[n];
|
||||||
|
EXPECT_NE(a, b);
|
||||||
|
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n)
|
||||||
|
*b.add_dir_assets() = child_protos_[n];
|
||||||
|
EXPECT_EQ(a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsFileWithDirAssets) {
|
||||||
|
// Put all children into the direct asset list.
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n)
|
||||||
|
*proto_.add_dir_assets() = child_protos_[n];
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "File asset contains sub-assets");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsFileWithSymlink) {
|
||||||
|
proto_.set_symlink_target("symlink");
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "File asset contains a symlink");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsDirWithFileChunks) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1, 2}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "Directory asset contains file chunks");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsDirWithIndirectFileChunks) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddIndirectChunks({{3}, {4, 5, 6}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "Directory asset contains file chunks");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsDirWithSymlink) {
|
||||||
|
proto_.set_symlink_target("symlink");
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "Directory asset contains a symlink");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsDirWithFileSize) {
|
||||||
|
proto_.set_file_size(2);
|
||||||
|
proto_.set_type(AssetProto::DIRECTORY);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(),
|
||||||
|
"File size is defined for a directory asset");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsSymlinkWithDirAssets) {
|
||||||
|
// Put all children into the direct asset list.
|
||||||
|
for (size_t n = 0; n < kNumChildProtos; ++n)
|
||||||
|
*proto_.add_dir_assets() = child_protos_[n];
|
||||||
|
proto_.set_type(AssetProto::SYMLINK);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "Symlink asset contains sub-assets");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsSymlinkWithIndirectFileChunks) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddIndirectChunks({{3}, {4, 5, 6}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(offset);
|
||||||
|
proto_.set_type(AssetProto::SYMLINK);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "Symlink asset contains file chunks");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsSymlinkWithFileSize) {
|
||||||
|
proto_.set_file_size(2);
|
||||||
|
proto_.set_type(AssetProto::SYMLINK);
|
||||||
|
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(),
|
||||||
|
"File size is defined for a symlink asset");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsUndefinedAssetType) {
|
||||||
|
proto_.set_type(AssetProto::UNKNOWN);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(), "Undefined asset type");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsFileChunkWrongOffsets) {
|
||||||
|
uint64_t offset = 10;
|
||||||
|
AddChunks({{1}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
offset = 5;
|
||||||
|
AddChunks({{2}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
proto_.set_file_size(2);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(
|
||||||
|
asset_check_.c_str(),
|
||||||
|
"Disordered direct chunks: idx=1, total_offset=10, chunk_offset=5");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsWrongFileSize) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddChunks({{1}, {2}}, &offset, proto_.mutable_file_chunks());
|
||||||
|
AddIndirectChunks({{3}, {4}, {5}, {6}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
AddIndirectChunks({{7}, {8}, {9}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(5);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
std::vector<char> data(9);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(asset_check_.c_str(),
|
||||||
|
"The last absolute file offset exceeds the file size: 8 >= 5");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsNonZeroFirstIndirectListOffset) {
|
||||||
|
uint64_t offset = 10;
|
||||||
|
AddIndirectChunks({{1}}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(1);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(
|
||||||
|
asset_check_.c_str(),
|
||||||
|
"Disordered indirect chunk list: the list offset should be 0, as there "
|
||||||
|
"are no direct file chunks: list_offset=10, previous list_offset=0");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentFailsNonIncreasingIndirectListOffset) {
|
||||||
|
uint64_t offset = 0;
|
||||||
|
AddIndirectChunks({{1}, {2}, {3}}, &offset,
|
||||||
|
proto_.mutable_file_indirect_chunks());
|
||||||
|
offset = 1;
|
||||||
|
AddIndirectChunks({{3}}, &offset, proto_.mutable_file_indirect_chunks());
|
||||||
|
proto_.set_file_size(3);
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
|
||||||
|
// Read the first indirect list to fill the internal structure.
|
||||||
|
std::vector<char> data(3);
|
||||||
|
absl::StatusOr<uint64_t> bytes_read =
|
||||||
|
asset_.Read(0, data.data(), data.size());
|
||||||
|
|
||||||
|
ASSERT_OK(bytes_read);
|
||||||
|
EXPECT_EQ(*bytes_read, 3);
|
||||||
|
|
||||||
|
EXPECT_FALSE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_STREQ(
|
||||||
|
asset_check_.c_str(),
|
||||||
|
"Disordered indirect chunk list: the list offset should increase: "
|
||||||
|
"list_offset=1, previous list_offset=0, total_offset=2");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AssetTest, IsConsistentEmptyFileSucceeds) {
|
||||||
|
proto_.set_type(AssetProto::FILE);
|
||||||
|
asset_.Initialize(kParentIno, &store_, &proto_);
|
||||||
|
proto_.set_file_size(0);
|
||||||
|
|
||||||
|
EXPECT_TRUE(asset_.IsConsistent(&asset_check_));
|
||||||
|
EXPECT_TRUE(asset_check_.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
1553
cdc_fuse_fs/cdc_fuse_fs.cc
Normal file
1553
cdc_fuse_fs/cdc_fuse_fs.cc
Normal file
File diff suppressed because it is too large
Load Diff
86
cdc_fuse_fs/cdc_fuse_fs.h
Normal file
86
cdc_fuse_fs/cdc_fuse_fs.h
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_FUSE_FS_CDC_FUSE_FS_H_
|
||||||
|
#define CDC_FUSE_FS_CDC_FUSE_FS_H_
|
||||||
|
|
||||||
|
#ifndef R_OK
|
||||||
|
#define R_OK 4
|
||||||
|
#endif
|
||||||
|
#ifndef W_OK
|
||||||
|
#define W_OK 2
|
||||||
|
#endif
|
||||||
|
#ifndef X_OK
|
||||||
|
#define X_OK 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "cdc_fuse_fs/config_stream_client.h"
|
||||||
|
#include "grpcpp/channel.h"
|
||||||
|
#include "manifest/manifest_proto_defs.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class DataStoreReader;
|
||||||
|
|
||||||
|
// CdcFuse filesystem constants, exposed for testing.
|
||||||
|
namespace internal {
|
||||||
|
// Number of hardlinks is not important since the fs is read-only (I think).
|
||||||
|
constexpr int kCdcFuseDefaultNLink = 1;
|
||||||
|
|
||||||
|
// Cloudcast user and group id.
|
||||||
|
constexpr int kCdcFuseCloudcastUid = 1000;
|
||||||
|
constexpr int kCdcFuseCloudcastGid = 1000;
|
||||||
|
|
||||||
|
// Root user and group id.
|
||||||
|
constexpr int kCdcFuseRootUid = 0;
|
||||||
|
constexpr int kCdcFuseRootGid = 0;
|
||||||
|
|
||||||
|
// Default timeout after which the kernel will assume inodes are stale.
|
||||||
|
constexpr double kCdcFuseInodeTimeoutSec = 1.0;
|
||||||
|
} // namespace internal
|
||||||
|
|
||||||
|
namespace cdc_fuse_fs {
|
||||||
|
|
||||||
|
// Initializes the CDC FUSE filesystem. Parses the command line, sets up a
|
||||||
|
// channel and a session, and optionally forks the process. For valid arguments
|
||||||
|
// see fuse_common.h.
|
||||||
|
absl::Status Initialize(int argc, char** argv);
|
||||||
|
|
||||||
|
// Starts a client to read configuration updates over gRPC |channel|.
|
||||||
|
// |instance| is the gamelet instance id.
|
||||||
|
absl::Status StartConfigClient(std::string instance,
|
||||||
|
std::shared_ptr<grpc::Channel> channel);
|
||||||
|
|
||||||
|
// Sets the |data_store_reader| to load data from, initializes FUSE with a
|
||||||
|
// manifest for an empty directory, and starts the filesystem. The call does
|
||||||
|
// not return until the filesystem finishes running.
|
||||||
|
// |consistency_check| defines whether FUSE consistency should be inspected
|
||||||
|
// after each manifest update.
|
||||||
|
absl::Status Run(DataStoreReader* data_store_reader, bool consistency_check);
|
||||||
|
|
||||||
|
// Releases resources. Should be called when the filesystem finished running.
|
||||||
|
void Shutdown();
|
||||||
|
|
||||||
|
// Sets |manifest_id| as a CDC FUSE root.
|
||||||
|
absl::Status SetManifest(const ContentIdProto& manifest_id);
|
||||||
|
|
||||||
|
} // namespace cdc_fuse_fs
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_FUSE_FS_CDC_FUSE_FS_H_
|
||||||
61
cdc_fuse_fs/cdc_fuse_fs.vcxproj
Normal file
61
cdc_fuse_fs/cdc_fuse_fs.vcxproj
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||||
|
<ItemGroup Label="ProjectConfigurations">
|
||||||
|
<ProjectConfiguration Include="Debug|GGP">
|
||||||
|
<Configuration>Debug</Configuration>
|
||||||
|
<Platform>GGP</Platform>
|
||||||
|
</ProjectConfiguration>
|
||||||
|
<ProjectConfiguration Include="Release|GGP">
|
||||||
|
<Configuration>Release</Configuration>
|
||||||
|
<Platform>GGP</Platform>
|
||||||
|
</ProjectConfiguration>
|
||||||
|
</ItemGroup>
|
||||||
|
<PropertyGroup Label="Globals">
|
||||||
|
<ProjectGuid>{a537310c-0571-43d5-b7fe-c867f702294f}</ProjectGuid>
|
||||||
|
<RootNamespace>cdc_fuse_fs</RootNamespace>
|
||||||
|
</PropertyGroup>
|
||||||
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|GGP'" Label="Configuration">
|
||||||
|
<ConfigurationType>Makefile</ConfigurationType>
|
||||||
|
<UseDebugLibraries>true</UseDebugLibraries>
|
||||||
|
</PropertyGroup>
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|GGP'" Label="Configuration">
|
||||||
|
<ConfigurationType>Makefile</ConfigurationType>
|
||||||
|
<UseDebugLibraries>false</UseDebugLibraries>
|
||||||
|
</PropertyGroup>
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|GGP'">
|
||||||
|
<OutDir>$(SolutionDir)bazel-out\k8-dbg\bin\cdc_fuse_fs\</OutDir>
|
||||||
|
<NMakePreprocessorDefinitions>$(NMakePreprocessorDefinitions)</NMakePreprocessorDefinitions>
|
||||||
|
<AdditionalOptions>/std:c++17</AdditionalOptions>
|
||||||
|
</PropertyGroup>
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|GGP'">
|
||||||
|
<OutDir>$(SolutionDir)bazel-out\k8-opt\bin\cdc_fuse_fs\</OutDir>
|
||||||
|
<NMakePreprocessorDefinitions>$(NMakePreprocessorDefinitions)</NMakePreprocessorDefinitions>
|
||||||
|
<AdditionalOptions>/std:c++17</AdditionalOptions>
|
||||||
|
</PropertyGroup>
|
||||||
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||||
|
<ImportGroup Label="ExtensionSettings">
|
||||||
|
</ImportGroup>
|
||||||
|
<ImportGroup Label="Shared">
|
||||||
|
<Import Project="..\all_files.vcxitems" Label="Shared" />
|
||||||
|
</ImportGroup>
|
||||||
|
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|GGP'">
|
||||||
|
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||||
|
</ImportGroup>
|
||||||
|
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|GGP'">
|
||||||
|
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||||
|
</ImportGroup>
|
||||||
|
<PropertyGroup Label="UserMacros">
|
||||||
|
</PropertyGroup>
|
||||||
|
<!-- Bazel setup -->
|
||||||
|
<PropertyGroup>
|
||||||
|
<BazelTargets>//cdc_fuse_fs</BazelTargets>
|
||||||
|
<BazelOutputFile>cdc_fuse_fs</BazelOutputFile>
|
||||||
|
<BazelIncludePaths>..\;..\third_party\absl;..\third_party\blake3\c;..\third_party\googletest\googletest\include;..\third_party\protobuf\src;..\third_party\grpc\include;$(AdditionalIncludeDirectories)</BazelIncludePaths>
|
||||||
|
<BazelSourcePathPrefix>..\/</BazelSourcePathPrefix>
|
||||||
|
</PropertyGroup>
|
||||||
|
<Import Project="..\NMakeBazelProject.targets" />
|
||||||
|
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||||
|
<ImportGroup Label="ExtensionTargets">
|
||||||
|
</ImportGroup>
|
||||||
|
</Project>
|
||||||
2
cdc_fuse_fs/cdc_fuse_fs.vcxproj.filters
Normal file
2
cdc_fuse_fs/cdc_fuse_fs.vcxproj.filters
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" />
|
||||||
1146
cdc_fuse_fs/cdc_fuse_fs_test.cc
Normal file
1146
cdc_fuse_fs/cdc_fuse_fs_test.cc
Normal file
File diff suppressed because it is too large
Load Diff
122
cdc_fuse_fs/config_stream_client.cc
Normal file
122
cdc_fuse_fs/config_stream_client.cc
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_fuse_fs/config_stream_client.h"
|
||||||
|
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "common/grpc_status.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "manifest/content_id.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
using GetManifestIdRequest = proto::GetManifestIdRequest;
|
||||||
|
using GetManifestIdResponse = proto::GetManifestIdResponse;
|
||||||
|
using AckManifestIdReceivedRequest = proto::AckManifestIdReceivedRequest;
|
||||||
|
using AckManifestIdReceivedResponse = proto::AckManifestIdReceivedResponse;
|
||||||
|
using ConfigStreamService = proto::ConfigStreamService;
|
||||||
|
|
||||||
|
// Asynchronous gRPC streaming client for streaming configuration changes to
|
||||||
|
// gamelets. The client runs inside the CDC FUSE and requests updated manifest
|
||||||
|
// from the workstation.
|
||||||
|
class ManifestIdReader {
|
||||||
|
public:
|
||||||
|
ManifestIdReader(ConfigStreamService::Stub* stub) : stub_(stub) {}
|
||||||
|
|
||||||
|
// Starts a GetManifestId() request and listens to the stream of manifest ids
|
||||||
|
// sent from the workstation. Calls |callback| on every manifest id received.
|
||||||
|
absl::Status StartListeningToManifestUpdates(
|
||||||
|
std::function<absl::Status(const ContentIdProto&)> callback) {
|
||||||
|
callback_ = callback;
|
||||||
|
|
||||||
|
GetManifestIdRequest request;
|
||||||
|
assert(!reader_);
|
||||||
|
reader_ = stub_->GetManifestId(&context_, request);
|
||||||
|
if (!reader_)
|
||||||
|
return absl::UnavailableError("Failed to create manifest id reader");
|
||||||
|
|
||||||
|
reader_thread_ =
|
||||||
|
std::make_unique<std::thread>([this]() { ReadThreadMain(); });
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Thread that reads manifest ids from the GetManifestId() response stream.
|
||||||
|
void ReadThreadMain() {
|
||||||
|
GetManifestIdResponse response;
|
||||||
|
LOG_INFO("Started manifest id reader thread")
|
||||||
|
for (;;) {
|
||||||
|
LOG_INFO("Waiting for manifest id update")
|
||||||
|
if (!reader_->Read(&response)) break;
|
||||||
|
|
||||||
|
LOG_INFO("Received new manifest id '%s'",
|
||||||
|
ContentId::ToHexString(response.id()));
|
||||||
|
absl::Status status = callback_(response.id());
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_ERROR("Failed to execute callback for manifest update '%s': '%s'",
|
||||||
|
ContentId::ToHexString(response.id()), status.message());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// This should happen if the server shuts down.
|
||||||
|
LOG_INFO("Stopped manifest id reader thread")
|
||||||
|
}
|
||||||
|
|
||||||
|
void Shutdown() {
|
||||||
|
if (!reader_thread_) return;
|
||||||
|
|
||||||
|
context_.TryCancel();
|
||||||
|
if (reader_thread_->joinable()) reader_thread_->join();
|
||||||
|
reader_thread_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
ConfigStreamService::Stub* stub_;
|
||||||
|
grpc::ClientContext context_;
|
||||||
|
std::unique_ptr<grpc::ClientReader<GetManifestIdResponse>> reader_;
|
||||||
|
std::function<absl::Status(const ContentIdProto&)> callback_;
|
||||||
|
std::unique_ptr<std::thread> reader_thread_;
|
||||||
|
};
|
||||||
|
|
||||||
|
ConfigStreamClient::ConfigStreamClient(std::string instance,
|
||||||
|
std::shared_ptr<grpc::Channel> channel)
|
||||||
|
: instance_(std::move(instance)),
|
||||||
|
stub_(ConfigStreamService::NewStub(std::move(channel))),
|
||||||
|
read_client_(std::make_unique<ManifestIdReader>(stub_.get())) {}
|
||||||
|
|
||||||
|
ConfigStreamClient::~ConfigStreamClient() = default;
|
||||||
|
|
||||||
|
absl::Status ConfigStreamClient::StartListeningToManifestUpdates(
|
||||||
|
std::function<absl::Status(const ContentIdProto&)> callback) {
|
||||||
|
LOG_INFO("Starting to listen to manifest updates");
|
||||||
|
return read_client_->StartListeningToManifestUpdates(callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status ConfigStreamClient::SendManifestAck(ContentIdProto manifest_id) {
|
||||||
|
AckManifestIdReceivedRequest request;
|
||||||
|
request.set_gamelet_id(instance_);
|
||||||
|
*request.mutable_manifest_id() = std::move(manifest_id);
|
||||||
|
|
||||||
|
grpc::ClientContext context_;
|
||||||
|
AckManifestIdReceivedResponse response;
|
||||||
|
RETURN_ABSL_IF_ERROR(
|
||||||
|
stub_->AckManifestIdReceived(&context_, request, &response));
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConfigStreamClient::Shutdown() {
|
||||||
|
LOG_INFO("Stopping to listen to manifest updates");
|
||||||
|
read_client_->Shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
67
cdc_fuse_fs/config_stream_client.h
Normal file
67
cdc_fuse_fs/config_stream_client.h
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_FUSE_FS_CONFIG_STREAM_CLIENT_H_
|
||||||
|
#define CDC_FUSE_FS_CONFIG_STREAM_CLIENT_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "grpcpp/grpcpp.h"
|
||||||
|
#include "manifest/manifest_proto_defs.h"
|
||||||
|
#include "proto/asset_stream_service.grpc.pb.h"
|
||||||
|
|
||||||
|
namespace grpc_impl {
|
||||||
|
class Channel;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class ManifestIdReader;
|
||||||
|
|
||||||
|
class ConfigStreamClient {
|
||||||
|
public:
|
||||||
|
// |instance| is the id of the gamelet.
|
||||||
|
// |channel| is a gRPC channel to use.
|
||||||
|
ConfigStreamClient(std::string instance,
|
||||||
|
std::shared_ptr<grpc::Channel> channel);
|
||||||
|
~ConfigStreamClient();
|
||||||
|
|
||||||
|
// Sends a request to get a stream of manifest id updates. |callback| is
|
||||||
|
// called from a background thread for every manifest id received.
|
||||||
|
// Returns immediately without waiting for the first manifest id.
|
||||||
|
absl::Status StartListeningToManifestUpdates(
|
||||||
|
std::function<absl::Status(const ContentIdProto&)> callback);
|
||||||
|
|
||||||
|
// Sends a message to indicate that the |manifest_id| was received and FUSE
|
||||||
|
// has been updated to use the new manifest.
|
||||||
|
absl::Status SendManifestAck(ContentIdProto manifest_id);
|
||||||
|
|
||||||
|
// Stops listening for manifest updates.
|
||||||
|
void Shutdown();
|
||||||
|
|
||||||
|
private:
|
||||||
|
using ConfigStreamService = proto::ConfigStreamService;
|
||||||
|
|
||||||
|
const std::string instance_;
|
||||||
|
const std::unique_ptr<ConfigStreamService::Stub> stub_;
|
||||||
|
|
||||||
|
std::unique_ptr<ManifestIdReader> read_client_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_FUSE_FS_CONFIG_STREAM_CLIENT_H_
|
||||||
33
cdc_fuse_fs/constants.h
Normal file
33
cdc_fuse_fs/constants.h
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_FUSE_FS_CONSTANTS_H_
|
||||||
|
#define CDC_FUSE_FS_CONSTANTS_H_
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// FUSE prints this to stdout when the binary timestamp and file size match the
|
||||||
|
// file on the workstation.
|
||||||
|
static constexpr char kFuseUpToDate[] = "cdc_fuse_fs is up-to-date";
|
||||||
|
|
||||||
|
// FUSE prints this to stdout when the binary timestamp or file size does not
|
||||||
|
// match the file on the workstation. It indicates that the binary has to be
|
||||||
|
// redeployed.
|
||||||
|
static constexpr char kFuseNotUpToDate[] = "cdc_fuse_fs is not up-to-date";
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_FUSE_FS_CONSTANTS_H_
|
||||||
202
cdc_fuse_fs/main.cc
Normal file
202
cdc_fuse_fs/main.cc
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/flags/flag.h"
|
||||||
|
#include "absl/flags/parse.h"
|
||||||
|
#include "absl_helper/jedec_size_flag.h"
|
||||||
|
#include "cdc_fuse_fs/cdc_fuse_fs.h"
|
||||||
|
#include "cdc_fuse_fs/constants.h"
|
||||||
|
#include "common/gamelet_component.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "data_store/data_provider.h"
|
||||||
|
#include "data_store/disk_data_store.h"
|
||||||
|
#include "data_store/grpc_reader.h"
|
||||||
|
#include "grpcpp/channel.h"
|
||||||
|
#include "grpcpp/create_channel.h"
|
||||||
|
#include "grpcpp/support/channel_arguments.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr char kFuseFilename[] = "cdc_fuse_fs";
|
||||||
|
constexpr char kLibFuseFilename[] = "libfuse.so";
|
||||||
|
|
||||||
|
absl::StatusOr<bool> IsUpToDate(const std::string& components_arg) {
|
||||||
|
// Components are expected to reside in the same dir as the executable.
|
||||||
|
std::string component_dir;
|
||||||
|
RETURN_IF_ERROR(path::GetExeDir(&component_dir));
|
||||||
|
|
||||||
|
std::vector<GameletComponent> components =
|
||||||
|
GameletComponent::FromCommandLineArgs(components_arg);
|
||||||
|
std::vector<GameletComponent> our_components;
|
||||||
|
absl::Status status =
|
||||||
|
GameletComponent::Get({path::Join(component_dir, kFuseFilename),
|
||||||
|
path::Join(component_dir, kLibFuseFilename)},
|
||||||
|
&our_components);
|
||||||
|
if (!status.ok() || components != our_components) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
ABSL_FLAG(std::string, instance, "", "Gamelet instance id");
|
||||||
|
ABSL_FLAG(
|
||||||
|
std::string, components, "",
|
||||||
|
"Whitespace-separated triples filename, size and timestamp of the "
|
||||||
|
"workstation version of this binary and dependencies. Used for a fast "
|
||||||
|
"up-to-date check.");
|
||||||
|
ABSL_FLAG(uint16_t, port, 0, "Port to connect to on localhost");
|
||||||
|
ABSL_FLAG(cdc_ft::JedecSize, prefetch_size, cdc_ft::JedecSize(512 << 10),
|
||||||
|
"Additional data to request from the server when a FUSE read of "
|
||||||
|
"maximum size is detected. This amount is added to the original "
|
||||||
|
"request. Supports common unit suffixes K, M, G");
|
||||||
|
ABSL_FLAG(std::string, cache_dir, "/var/cache/asset_streaming",
|
||||||
|
"Cache directory to store data chunks.");
|
||||||
|
ABSL_FLAG(int, cache_dir_levels, 2,
|
||||||
|
"Fanout of sub-directories to create within the cache directory.");
|
||||||
|
ABSL_FLAG(int, verbosity, 0, "Log verbosity");
|
||||||
|
ABSL_FLAG(bool, stats, false, "Enable statistics");
|
||||||
|
ABSL_FLAG(bool, check, false, "Execute consistency check");
|
||||||
|
ABSL_FLAG(cdc_ft::JedecSize, cache_capacity,
|
||||||
|
cdc_ft::JedecSize(cdc_ft::DiskDataStore::kDefaultCapacity),
|
||||||
|
"Cache capacity. Supports common unit suffixes K, M, G.");
|
||||||
|
ABSL_FLAG(uint32_t, cleanup_timeout, cdc_ft::DataProvider::kCleanupTimeoutSec,
|
||||||
|
"Period in seconds at which instance cache cleanups are run");
|
||||||
|
ABSL_FLAG(uint32_t, access_idle_timeout, cdc_ft::DataProvider::kAccessIdleSec,
|
||||||
|
"Do not run instance cache cleanups for this many seconds after the "
|
||||||
|
"last file access");
|
||||||
|
|
||||||
|
static_assert(static_cast<int>(absl::StatusCode::kOk) == 0, "kOk != 0");
|
||||||
|
|
||||||
|
// Usage: cdc_fuse_fs <ABSL_FLAGs> -- mount_dir [-d|-s|..]
|
||||||
|
// Any args after -- are FUSE args, search third_party/fuse for FUSE_OPT_KEY or
|
||||||
|
// FUSE_LIB_OPT (there doesn't seem to be a place where they're all described).
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
// Parse absl flags.
|
||||||
|
std::vector<char*> mount_args = absl::ParseCommandLine(argc, argv);
|
||||||
|
std::string instance = absl::GetFlag(FLAGS_instance);
|
||||||
|
std::string components = absl::GetFlag(FLAGS_components);
|
||||||
|
uint16_t port = absl::GetFlag(FLAGS_port);
|
||||||
|
std::string cache_dir = absl::GetFlag(FLAGS_cache_dir);
|
||||||
|
int cache_dir_levels = absl::GetFlag(FLAGS_cache_dir_levels);
|
||||||
|
int verbosity = absl::GetFlag(FLAGS_verbosity);
|
||||||
|
bool stats = absl::GetFlag(FLAGS_stats);
|
||||||
|
bool consistency_check = absl::GetFlag(FLAGS_check);
|
||||||
|
uint64_t cache_capacity = absl::GetFlag(FLAGS_cache_capacity).Size();
|
||||||
|
unsigned int dp_cleanup_timeout = absl::GetFlag(FLAGS_cleanup_timeout);
|
||||||
|
unsigned int dp_access_idle_timeout =
|
||||||
|
absl::GetFlag(FLAGS_access_idle_timeout);
|
||||||
|
|
||||||
|
// Log to console. Logs are streamed back to the workstation through the SSH
|
||||||
|
// session.
|
||||||
|
cdc_ft::Log::Initialize(std::make_unique<cdc_ft::ConsoleLog>(
|
||||||
|
cdc_ft::Log::VerbosityToLogLevel(verbosity)));
|
||||||
|
|
||||||
|
// Perform up-to-date check.
|
||||||
|
absl::StatusOr<bool> is_up_to_date = cdc_ft::IsUpToDate(components);
|
||||||
|
if (!is_up_to_date.ok()) {
|
||||||
|
LOG_ERROR("Failed to check file system freshness: %s",
|
||||||
|
is_up_to_date.status().ToString());
|
||||||
|
return static_cast<int>(is_up_to_date.status().code());
|
||||||
|
}
|
||||||
|
if (!*is_up_to_date) {
|
||||||
|
printf("%s\n", cdc_ft::kFuseNotUpToDate);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
printf("%s\n", cdc_ft::kFuseUpToDate);
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
// Create fs. The rest of the flags are mount flags, so pass them along.
|
||||||
|
absl::Status status = cdc_ft::cdc_fuse_fs::Initialize(
|
||||||
|
static_cast<int>(mount_args.size()), mount_args.data());
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_ERROR("Failed to initialize file system: %s", status.ToString());
|
||||||
|
return static_cast<int>(status.code());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create disk data store.
|
||||||
|
absl::StatusOr<std::unique_ptr<cdc_ft::DiskDataStore>> store =
|
||||||
|
cdc_ft::DiskDataStore::Create(cache_dir_levels, cache_dir, false);
|
||||||
|
if (!store.ok()) {
|
||||||
|
LOG_ERROR("Failed to initialize the chunk cache in directory '%s': %s",
|
||||||
|
absl::GetFlag(FLAGS_cache_dir), store.status().ToString());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
LOG_INFO("Setting cache capacity to '%u'", cache_capacity);
|
||||||
|
store.value()->SetCapacity(cache_capacity);
|
||||||
|
LOG_INFO("Caching chunks in '%s'", store.value()->RootDir());
|
||||||
|
|
||||||
|
// Start a gRpc client.
|
||||||
|
std::string client_address = absl::StrFormat("localhost:%u", port);
|
||||||
|
grpc::ChannelArguments channel_args;
|
||||||
|
channel_args.SetMaxReceiveMessageSize(-1);
|
||||||
|
std::shared_ptr<grpc::Channel> grpc_channel = grpc::CreateCustomChannel(
|
||||||
|
client_address, grpc::InsecureChannelCredentials(), channel_args);
|
||||||
|
std::vector<std::unique_ptr<cdc_ft::DataStoreReader>> readers;
|
||||||
|
readers.emplace_back(
|
||||||
|
std::make_unique<cdc_ft::GrpcReader>(grpc_channel, stats));
|
||||||
|
cdc_ft::GrpcReader* grpc_reader =
|
||||||
|
static_cast<cdc_ft::GrpcReader*>(readers[0].get());
|
||||||
|
|
||||||
|
// Send all cached content ids to the client if statistics are enabled.
|
||||||
|
if (stats) {
|
||||||
|
LOG_INFO("Sending all cached content ids");
|
||||||
|
absl::StatusOr<std::vector<cdc_ft::ContentIdProto>> ids =
|
||||||
|
store.value()->List();
|
||||||
|
if (!ids.ok()) {
|
||||||
|
LOG_ERROR("Failed to get all cached content ids: %s",
|
||||||
|
ids.status().ToString());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
status = grpc_reader->SendCachedContentIds(*ids);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_ERROR("Failed to send all cached content ids: %s", status.ToString());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create data provider.
|
||||||
|
size_t prefetch_size = absl::GetFlag(FLAGS_prefetch_size).Size();
|
||||||
|
cdc_ft::DataProvider data_provider(std::move(*store), std::move(readers),
|
||||||
|
prefetch_size, dp_cleanup_timeout,
|
||||||
|
dp_access_idle_timeout);
|
||||||
|
|
||||||
|
if (!cdc_ft::cdc_fuse_fs::StartConfigClient(instance, grpc_channel).ok()) {
|
||||||
|
LOG_ERROR("Could not start reading configuration updates'");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run FUSE.
|
||||||
|
LOG_INFO("Running filesystem");
|
||||||
|
status = cdc_ft::cdc_fuse_fs::Run(&data_provider, consistency_check);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_ERROR("Filesystem stopped with error: %s", status.ToString());
|
||||||
|
}
|
||||||
|
LOG_INFO("Filesystem ran successfully and shuts down");
|
||||||
|
|
||||||
|
data_provider.Shutdown();
|
||||||
|
cdc_ft::cdc_fuse_fs::Shutdown();
|
||||||
|
cdc_ft::Log::Shutdown();
|
||||||
|
|
||||||
|
static_assert(static_cast<int>(absl::StatusCode::kOk) == 0, "kOk != 0");
|
||||||
|
return static_cast<int>(status.code());
|
||||||
|
}
|
||||||
111
cdc_fuse_fs/mock_libfuse.cc
Normal file
111
cdc_fuse_fs/mock_libfuse.cc
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_fuse_fs/mock_libfuse.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
MockLibFuse* g_fuse;
|
||||||
|
}
|
||||||
|
|
||||||
|
MockLibFuse::MockLibFuse() {
|
||||||
|
assert(!g_fuse);
|
||||||
|
g_fuse = this;
|
||||||
|
}
|
||||||
|
|
||||||
|
MockLibFuse::~MockLibFuse() {
|
||||||
|
assert(g_fuse == this);
|
||||||
|
g_fuse = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MockLibFuse::SetUid(int uid) {
|
||||||
|
assert(g_fuse == this);
|
||||||
|
g_fuse->context.uid = uid;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MockLibFuse::SetGid(int gid) {
|
||||||
|
assert(g_fuse == this);
|
||||||
|
g_fuse->context.gid = gid;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t fuse_add_direntry(fuse_req_t req, char* buf, size_t bufsize,
|
||||||
|
const char* name, const struct stat* stbuf,
|
||||||
|
off_t off) {
|
||||||
|
assert(g_fuse);
|
||||||
|
if (bufsize >= sizeof(MockLibFuse::DirEntry)) {
|
||||||
|
assert(stbuf);
|
||||||
|
auto* entry = reinterpret_cast<MockLibFuse::DirEntry*>(buf);
|
||||||
|
strncpy(entry->name, name, sizeof(entry->name));
|
||||||
|
entry->name[sizeof(entry->name) - 1] = 0;
|
||||||
|
entry->ino = stbuf->st_ino;
|
||||||
|
entry->mode = stbuf->st_mode;
|
||||||
|
entry->off = off;
|
||||||
|
}
|
||||||
|
return sizeof(MockLibFuse::DirEntry);
|
||||||
|
}
|
||||||
|
|
||||||
|
int fuse_reply_attr(fuse_req_t req, const struct stat* attr,
|
||||||
|
double attr_timeout) {
|
||||||
|
assert(g_fuse);
|
||||||
|
assert(attr);
|
||||||
|
g_fuse->attrs.emplace_back(*attr, attr_timeout);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int fuse_reply_buf(fuse_req_t req, const char* buf, size_t size) {
|
||||||
|
assert(g_fuse);
|
||||||
|
std::vector<char> data;
|
||||||
|
if (buf && size > 0) {
|
||||||
|
data.insert(data.end(), buf, buf + size);
|
||||||
|
}
|
||||||
|
g_fuse->buffers.push_back(std::move(data));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param* e) {
|
||||||
|
assert(g_fuse);
|
||||||
|
assert(e);
|
||||||
|
g_fuse->entries.push_back(*e);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int fuse_reply_err(fuse_req_t req, int err) {
|
||||||
|
assert(g_fuse);
|
||||||
|
g_fuse->errors.push_back(err);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info* fi) {
|
||||||
|
assert(g_fuse);
|
||||||
|
assert(fi);
|
||||||
|
g_fuse->open_files.push_back(*fi);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void fuse_reply_none(fuse_req_t req) {
|
||||||
|
assert(g_fuse);
|
||||||
|
++g_fuse->none_counter;
|
||||||
|
}
|
||||||
|
|
||||||
|
int fuse_reply_statfs(fuse_req_t req, const struct statvfs* stbuf) { return 0; }
|
||||||
|
|
||||||
|
struct fuse_context* fuse_get_context() {
|
||||||
|
assert(g_fuse);
|
||||||
|
return &g_fuse->context;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
123
cdc_fuse_fs/mock_libfuse.h
Normal file
123
cdc_fuse_fs/mock_libfuse.h
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_FUSE_FS_MOCK_LIBFUSE_H_
|
||||||
|
#define CDC_FUSE_FS_MOCK_LIBFUSE_H_
|
||||||
|
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
//
|
||||||
|
// The interface below mimics the part of the FUSE low level interface we need.
|
||||||
|
// See include/fuse_lowlevel.h for more information.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Definitions.
|
||||||
|
using fuse_ino_t = uint64_t;
|
||||||
|
using fuse_req_t = void*;
|
||||||
|
using nlink_t = uint64_t;
|
||||||
|
|
||||||
|
constexpr fuse_ino_t FUSE_ROOT_ID = 1;
|
||||||
|
#ifndef O_DIRECT
|
||||||
|
constexpr uint32_t O_DIRECT = 040000;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct fuse_entry_param {
|
||||||
|
fuse_ino_t ino;
|
||||||
|
struct stat attr;
|
||||||
|
double attr_timeout;
|
||||||
|
double entry_timeout;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct fuse_file_info {
|
||||||
|
int flags = O_RDONLY;
|
||||||
|
unsigned int direct_io : 1;
|
||||||
|
unsigned int keep_cache : 1;
|
||||||
|
|
||||||
|
fuse_file_info() : direct_io(0), keep_cache(0) {}
|
||||||
|
explicit fuse_file_info(int flags)
|
||||||
|
: flags(flags), direct_io(0), keep_cache(0) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct fuse_forget_data {
|
||||||
|
uint64_t ino;
|
||||||
|
uint64_t nlookup;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct fuse_context {
|
||||||
|
int uid;
|
||||||
|
int gid;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct statvfs {
|
||||||
|
uint32_t f_bsize;
|
||||||
|
uint32_t f_namemax;
|
||||||
|
};
|
||||||
|
|
||||||
|
// FUSE reply/action functions.
|
||||||
|
size_t fuse_add_direntry(fuse_req_t req, char* buf, size_t bufsize,
|
||||||
|
const char* name, const struct stat* stbuf, off_t off);
|
||||||
|
int fuse_reply_attr(fuse_req_t req, const struct stat* attr,
|
||||||
|
double attr_timeout);
|
||||||
|
int fuse_reply_buf(fuse_req_t req, const char* buf, size_t size);
|
||||||
|
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param* e);
|
||||||
|
int fuse_reply_err(fuse_req_t req, int err);
|
||||||
|
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info* fi);
|
||||||
|
void fuse_reply_none(fuse_req_t req);
|
||||||
|
int fuse_reply_statfs(fuse_req_t req, const struct statvfs* stbuf);
|
||||||
|
struct fuse_context* fuse_get_context();
|
||||||
|
|
||||||
|
// FUSE mocking class. Basically just a recorder for the fuse_* callbacks above.
|
||||||
|
struct MockLibFuse {
|
||||||
|
public:
|
||||||
|
MockLibFuse();
|
||||||
|
~MockLibFuse();
|
||||||
|
|
||||||
|
struct Attr {
|
||||||
|
struct stat value;
|
||||||
|
double timeout;
|
||||||
|
Attr(struct stat value, double timeout)
|
||||||
|
: value(std::move(value)), timeout(timeout) {}
|
||||||
|
};
|
||||||
|
void SetUid(int uid);
|
||||||
|
void SetGid(int gid);
|
||||||
|
|
||||||
|
// Struct stored in the buffer |buf| by fuse_add_direntry().
|
||||||
|
// Uses a maximum name size for simplicity.
|
||||||
|
struct DirEntry {
|
||||||
|
fuse_ino_t ino;
|
||||||
|
uint32_t mode;
|
||||||
|
char name[32];
|
||||||
|
off_t off;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<fuse_entry_param> entries;
|
||||||
|
std::vector<Attr> attrs;
|
||||||
|
std::vector<int> errors;
|
||||||
|
std::vector<fuse_file_info> open_files;
|
||||||
|
std::vector<std::vector<char>> buffers;
|
||||||
|
unsigned int none_counter = 0;
|
||||||
|
fuse_context context;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_FUSE_FS_MOCK_LIBFUSE_H_
|
||||||
35
cdc_indexer/BUILD
Normal file
35
cdc_indexer/BUILD
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
cc_binary(
|
||||||
|
name = "cdc_indexer",
|
||||||
|
srcs = ["main.cc"],
|
||||||
|
deps = [
|
||||||
|
":indexer_lib",
|
||||||
|
"//absl_helper:jedec_size_flag",
|
||||||
|
"//common:path",
|
||||||
|
"@com_google_absl//absl/flags:config",
|
||||||
|
"@com_google_absl//absl/flags:flag",
|
||||||
|
"@com_google_absl//absl/flags:parse",
|
||||||
|
"@com_google_absl//absl/flags:usage",
|
||||||
|
"@com_google_absl//absl/random",
|
||||||
|
"@com_google_absl//absl/time",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "indexer_lib",
|
||||||
|
srcs = ["indexer.cc"],
|
||||||
|
hdrs = ["indexer.h"],
|
||||||
|
deps = [
|
||||||
|
"//common:dir_iter",
|
||||||
|
"//common:path",
|
||||||
|
"//common:status_macros",
|
||||||
|
"//fastcdc",
|
||||||
|
"@com_github_blake3//:blake3",
|
||||||
|
"@com_google_absl//absl/functional:bind_front",
|
||||||
|
"@com_google_absl//absl/random",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
"@com_google_absl//absl/strings:str_format",
|
||||||
|
"@com_google_absl//absl/time",
|
||||||
|
],
|
||||||
|
)
|
||||||
72
cdc_indexer/README.md
Normal file
72
cdc_indexer/README.md
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
# CDC Indexer
|
||||||
|
|
||||||
|
This directory contains a CDC indexer based on our implementation of
|
||||||
|
[FastCDC](https://www.usenix.org/system/files/conference/atc16/atc16-paper-xia.pdf).
|
||||||
|
|
||||||
|
Run the sample with Bazel:
|
||||||
|
|
||||||
|
```
|
||||||
|
bazel run -c opt //cdc_indexer -- --inputs '/path/to/files'
|
||||||
|
```
|
||||||
|
|
||||||
|
The CDC algorithm can be tweaked with a few compile-time constants for
|
||||||
|
experimentation. See the file `indexer.h` for preprocessor macros that can be
|
||||||
|
enabled, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
bazel build -c opt --copt=-DCDC_GEAR_TABLE=1 //cdc_indexer
|
||||||
|
```
|
||||||
|
|
||||||
|
At the end of the operation, the indexer outputs a summary of the results such
|
||||||
|
as the following:
|
||||||
|
|
||||||
|
```
|
||||||
|
00:02 7.44 GB in 2 files processed at 3.1 GB/s, 50% deduplication
|
||||||
|
Operation succeeded.
|
||||||
|
|
||||||
|
Chunk size (min/avg/max): 128 KB / 256 KB / 1024 KB | Threads: 12
|
||||||
|
gear_table: 64 bit | mask_s: 0x49249249249249 | mask_l: 0x1249249249
|
||||||
|
Duration: 00:03
|
||||||
|
Total files: 2
|
||||||
|
Total chunks: 39203
|
||||||
|
Unique chunks: 20692
|
||||||
|
Total data: 9.25 GB
|
||||||
|
Unique data: 4.88 GB
|
||||||
|
Throughput: 3.07 GB/s
|
||||||
|
Avg. chunk size: 247 KB
|
||||||
|
Deduplication: 47.2%
|
||||||
|
|
||||||
|
160 KB ######### 1419 ( 7%)
|
||||||
|
192 KB ######## 1268 ( 6%)
|
||||||
|
224 KB ################### 2996 (14%)
|
||||||
|
256 KB ######################################## 6353 (31%)
|
||||||
|
288 KB ###################### 3466 (17%)
|
||||||
|
320 KB ########################## 4102 (20%)
|
||||||
|
352 KB ###### 946 ( 5%)
|
||||||
|
384 KB 75 ( 0%)
|
||||||
|
416 KB 27 ( 0%)
|
||||||
|
448 KB 7 ( 0%)
|
||||||
|
480 KB 5 ( 0%)
|
||||||
|
512 KB 1 ( 0%)
|
||||||
|
544 KB 4 ( 0%)
|
||||||
|
576 KB 2 ( 0%)
|
||||||
|
608 KB 3 ( 0%)
|
||||||
|
640 KB 3 ( 0%)
|
||||||
|
672 KB 3 ( 0%)
|
||||||
|
704 KB 2 ( 0%)
|
||||||
|
736 KB 0 ( 0%)
|
||||||
|
768 KB 0 ( 0%)
|
||||||
|
800 KB 1 ( 0%)
|
||||||
|
832 KB 0 ( 0%)
|
||||||
|
864 KB 0 ( 0%)
|
||||||
|
896 KB 0 ( 0%)
|
||||||
|
928 KB 0 ( 0%)
|
||||||
|
960 KB 0 ( 0%)
|
||||||
|
992 KB 0 ( 0%)
|
||||||
|
1024 KB 9 ( 0%)
|
||||||
|
```
|
||||||
|
|
||||||
|
For testing multiple combinations and comparing the results, the indexer also
|
||||||
|
features a flag `--results_file="results.csv"` which appends the raw data to the
|
||||||
|
given file in CSV format. Combine this flag with `--description` to label each
|
||||||
|
experiment with additional columns.
|
||||||
434
cdc_indexer/indexer.cc
Normal file
434
cdc_indexer/indexer.cc
Normal file
@@ -0,0 +1,434 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_indexer/indexer.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
|
#include <mutex>
|
||||||
|
#include <queue>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "absl/functional/bind_front.h"
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "absl/time/clock.h"
|
||||||
|
#include "blake3.h"
|
||||||
|
#include "common/dir_iter.h"
|
||||||
|
#include "common/errno_mapping.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
struct IndexerJob {
|
||||||
|
std::string filepath;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Indexer::Impl {
|
||||||
|
public:
|
||||||
|
Impl(const IndexerConfig& cfg, const std::vector<std::string>& inputs);
|
||||||
|
const IndexerConfig& Config() const;
|
||||||
|
|
||||||
|
// Calls the given `progress` function periodically until `SetDone(true)` is
|
||||||
|
// called.
|
||||||
|
void TriggerProgress(ProgressFn fn);
|
||||||
|
bool GetNextJob(IndexerJob* job);
|
||||||
|
|
||||||
|
bool HasError() const;
|
||||||
|
absl::Status Error() const;
|
||||||
|
void SetError(absl::Status err);
|
||||||
|
|
||||||
|
void SetDone(bool done);
|
||||||
|
|
||||||
|
inline const IndexerConfig& Cfg() const { return cfg_; }
|
||||||
|
inline Indexer::OpStats Stats() const;
|
||||||
|
inline Indexer::ChunkSizeMap ChunkSizes() const;
|
||||||
|
void AddChunk(const uint8_t* data, size_t len);
|
||||||
|
void AddFile();
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class Indexer;
|
||||||
|
// Calculates a hash value for the given data.
|
||||||
|
inline hash_t Hash(const uint8_t* data, size_t len);
|
||||||
|
inline hash_t HashBlake3(const uint8_t* data, size_t len);
|
||||||
|
inline hash_t HashXxhash(const uint8_t* data, size_t len);
|
||||||
|
// Finds the smallest power of 2 such that the result is <= size. If size is >
|
||||||
|
// 2^31, then UINT64_MAX is returned.
|
||||||
|
inline size_t SizeBucket(size_t size) const;
|
||||||
|
|
||||||
|
IndexerConfig cfg_;
|
||||||
|
bool done_;
|
||||||
|
// The following members are all guarded by jobs_mutex_.
|
||||||
|
std::queue<std::string> inputs_;
|
||||||
|
DirectoryIterator dir_iter_;
|
||||||
|
std::mutex jobs_mutex_;
|
||||||
|
// Guarded by chunks_mutex_
|
||||||
|
Indexer::ChunkMap chunks_;
|
||||||
|
std::mutex chunks_mutex_;
|
||||||
|
// Guarded by stats_mutex_.
|
||||||
|
Indexer::OpStats stats_;
|
||||||
|
mutable std::mutex stats_mutex_;
|
||||||
|
// Guarded by chunk_sizes_mutex_;
|
||||||
|
Indexer::ChunkSizeMap chunk_sizes_;
|
||||||
|
mutable std::mutex chunk_sizes_mutex_;
|
||||||
|
// Guarded by result_mutex_
|
||||||
|
absl::Status result_;
|
||||||
|
mutable std::mutex result_mutex_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Indexer::Worker {
|
||||||
|
public:
|
||||||
|
Worker(Impl* impl);
|
||||||
|
void Run();
|
||||||
|
|
||||||
|
private:
|
||||||
|
absl::Status IndexFile(const std::string& filepath);
|
||||||
|
|
||||||
|
Impl* impl_;
|
||||||
|
absl::Cord buf_;
|
||||||
|
const fastcdc::Config cdc_cfg_;
|
||||||
|
};
|
||||||
|
|
||||||
|
// This class holds a `Worker` object and the associated `std::thread` object
|
||||||
|
// that executes it.
|
||||||
|
class Indexer::WorkerThread {
|
||||||
|
public:
|
||||||
|
WorkerThread() : worker(nullptr), thrd(nullptr) {}
|
||||||
|
~WorkerThread() {
|
||||||
|
if (thrd) {
|
||||||
|
if (thrd->joinable()) thrd->join();
|
||||||
|
delete thrd;
|
||||||
|
}
|
||||||
|
if (worker) {
|
||||||
|
delete worker;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Worker* worker;
|
||||||
|
std::thread* thrd;
|
||||||
|
};
|
||||||
|
|
||||||
|
Indexer::Impl::Impl(const IndexerConfig& cfg,
|
||||||
|
const std::vector<std::string>& inputs)
|
||||||
|
: cfg_(cfg), done_(false) {
|
||||||
|
// Perform some sanity checks on the config.
|
||||||
|
if (cfg_.num_threads == 0)
|
||||||
|
cfg_.num_threads = std::thread::hardware_concurrency();
|
||||||
|
if (cfg_.read_block_size == 0) cfg_.read_block_size = 4 << 10;
|
||||||
|
if (cfg_.avg_chunk_size == 0) cfg_.avg_chunk_size = 256 << 10;
|
||||||
|
if (cfg_.min_chunk_size == 0 || cfg_.min_chunk_size > cfg_.avg_chunk_size)
|
||||||
|
cfg_.min_chunk_size = cfg_.avg_chunk_size >> 1;
|
||||||
|
if (cfg_.max_chunk_size == 0 || cfg_.max_chunk_size < cfg_.avg_chunk_size)
|
||||||
|
cfg_.max_chunk_size = cfg_.avg_chunk_size << 1;
|
||||||
|
if (cfg_.max_chunk_size_step == 0)
|
||||||
|
cfg_.max_chunk_size_step =
|
||||||
|
cfg_.min_chunk_size > 0 ? cfg_.min_chunk_size : 128u;
|
||||||
|
// Populate the CDC bitmasks which the Chunker creates. Only done here for
|
||||||
|
// being able to write it to the output, setting them in the IndexerConfig has
|
||||||
|
// no effect.
|
||||||
|
fastcdc::Config ccfg(cfg_.min_chunk_size, cfg_.avg_chunk_size,
|
||||||
|
cfg_.max_chunk_size);
|
||||||
|
Indexer::Chunker chunker(ccfg, nullptr);
|
||||||
|
cfg_.mask_s = chunker.Stage(0).mask;
|
||||||
|
cfg_.mask_l = chunker.Stage(chunker.StagesCount() - 1).mask;
|
||||||
|
// Collect inputs.
|
||||||
|
for (auto it = inputs.begin(); it != inputs.end(); ++it) {
|
||||||
|
inputs_.push(*it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const IndexerConfig& Indexer::Impl::Config() const { return cfg_; }
|
||||||
|
|
||||||
|
// Executes the `progress` function in a loop, approximately every 200ms. Call
|
||||||
|
// `SetDone(true)` to stop this function.
|
||||||
|
void Indexer::Impl::TriggerProgress(Indexer::ProgressFn fn) {
|
||||||
|
if (!fn) return;
|
||||||
|
const int64_t interval = 200;
|
||||||
|
absl::Time started = absl::Now();
|
||||||
|
// Keeping going until we're done or an error occured.
|
||||||
|
while (!done_ && !HasError()) {
|
||||||
|
absl::Time loop_started = absl::Now();
|
||||||
|
stats_mutex_.lock();
|
||||||
|
stats_.elapsed = loop_started - started;
|
||||||
|
stats_mutex_.unlock();
|
||||||
|
|
||||||
|
fn(Stats());
|
||||||
|
// Aim for one update every interval.
|
||||||
|
auto loop_elapsed = absl::ToInt64Milliseconds(loop_started - absl::Now());
|
||||||
|
if (loop_elapsed < interval)
|
||||||
|
std::this_thread::sleep_for(
|
||||||
|
std::chrono::milliseconds(interval - loop_elapsed));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Indexer::Impl::GetNextJob(IndexerJob* job) {
|
||||||
|
// Stop if an error occured.
|
||||||
|
if (HasError()) return false;
|
||||||
|
const std::lock_guard<std::mutex> lock(jobs_mutex_);
|
||||||
|
|
||||||
|
DirectoryEntry dent;
|
||||||
|
while (!dent.Valid()) {
|
||||||
|
// Open the next directory, if needed.
|
||||||
|
if (!dir_iter_.Valid()) {
|
||||||
|
if (inputs_.empty()) {
|
||||||
|
// We are done.
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
std::string input = inputs_.front();
|
||||||
|
std::string uinput = path::ToUnix(input);
|
||||||
|
inputs_.pop();
|
||||||
|
// Return files as jobs.
|
||||||
|
if (path::FileExists(uinput)) {
|
||||||
|
job->filepath = uinput;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// Otherwise read the directory.
|
||||||
|
if (!dir_iter_.Open(input, DirectorySearchFlags::kFiles)) {
|
||||||
|
// Ignore permission errors.
|
||||||
|
if (absl::IsPermissionDenied(dir_iter_.Status())) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!dir_iter_.Status().ok()) {
|
||||||
|
SetError(dir_iter_.Status());
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (dir_iter_.NextEntry(&dent)) {
|
||||||
|
break;
|
||||||
|
} else if (!dir_iter_.Status().ok()) {
|
||||||
|
SetError(dir_iter_.Status());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path::Join(&job->filepath, dir_iter_.Path(), dent.RelPathName());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Indexer::Impl::SetDone(bool done) { done_ = done; }
|
||||||
|
|
||||||
|
inline size_t Indexer::Impl::SizeBucket(size_t size) const {
|
||||||
|
size_t bucket = 1024;
|
||||||
|
// Go in steps of powers of two until min. chunk size is reached.
|
||||||
|
while (bucket < size && bucket < cfg_.min_chunk_size && bucket < (1llu << 63))
|
||||||
|
bucket <<= 1;
|
||||||
|
// Go in steps of the configurable step size afterwards.
|
||||||
|
while (bucket < size && bucket < (1llu << 63))
|
||||||
|
bucket += cfg_.max_chunk_size_step;
|
||||||
|
return bucket >= size ? bucket : UINT64_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline Indexer::OpStats Indexer::Impl::Stats() const {
|
||||||
|
const std::lock_guard<std::mutex> lock(stats_mutex_);
|
||||||
|
return stats_;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline Indexer::ChunkSizeMap Indexer::Impl::ChunkSizes() const {
|
||||||
|
const std::lock_guard<std::mutex> lock(chunk_sizes_mutex_);
|
||||||
|
return chunk_sizes_;
|
||||||
|
}
|
||||||
|
|
||||||
|
Indexer::hash_t Indexer::Impl::HashBlake3(const uint8_t* data, size_t len) {
|
||||||
|
blake3_hasher state;
|
||||||
|
uint8_t out[BLAKE3_OUT_LEN];
|
||||||
|
blake3_hasher_init(&state);
|
||||||
|
blake3_hasher_update(&state, data, len);
|
||||||
|
blake3_hasher_finalize(&state, out, BLAKE3_OUT_LEN);
|
||||||
|
return Indexer::hash_t(reinterpret_cast<const char*>(out), BLAKE3_OUT_LEN);
|
||||||
|
}
|
||||||
|
|
||||||
|
Indexer::hash_t Indexer::Impl::Hash(const uint8_t* data, size_t len) {
|
||||||
|
switch (cfg_.hash_type) {
|
||||||
|
case IndexerConfig::HashType::kNull:
|
||||||
|
return hash_t();
|
||||||
|
case IndexerConfig::HashType::kBlake3:
|
||||||
|
return HashBlake3(data, len);
|
||||||
|
case IndexerConfig::HashType::kUndefined:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
std::cerr << "Unknown hash type" << std::endl;
|
||||||
|
return std::string();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Indexer::Impl::AddChunk(const uint8_t* data, size_t len) {
|
||||||
|
std::string hash = Hash(data, len);
|
||||||
|
// See if the chunk already exists, insert it if not.
|
||||||
|
chunks_mutex_.lock();
|
||||||
|
bool new_chunk = chunks_.find(hash) == chunks_.end();
|
||||||
|
if (new_chunk) {
|
||||||
|
chunks_.emplace(hash, Chunk{hash, len});
|
||||||
|
}
|
||||||
|
chunks_mutex_.unlock();
|
||||||
|
|
||||||
|
// Update the stats.
|
||||||
|
stats_mutex_.lock();
|
||||||
|
stats_.total_bytes += len;
|
||||||
|
++stats_.total_chunks;
|
||||||
|
if (new_chunk) {
|
||||||
|
stats_.unique_bytes += len;
|
||||||
|
++stats_.unique_chunks;
|
||||||
|
}
|
||||||
|
stats_mutex_.unlock();
|
||||||
|
|
||||||
|
// Update chunk sizes distribution.
|
||||||
|
if (new_chunk) {
|
||||||
|
size_t bucket = SizeBucket(len);
|
||||||
|
chunk_sizes_mutex_.lock();
|
||||||
|
chunk_sizes_[bucket]++;
|
||||||
|
chunk_sizes_mutex_.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Indexer::Impl::AddFile() {
|
||||||
|
const std::lock_guard<std::mutex> lock(stats_mutex_);
|
||||||
|
++stats_.total_files;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Indexer::Impl::HasError() const {
|
||||||
|
const std::lock_guard<std::mutex> lock(result_mutex_);
|
||||||
|
return !result_.ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status Indexer::Impl::Error() const {
|
||||||
|
const std::lock_guard<std::mutex> lock(result_mutex_);
|
||||||
|
return result_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Indexer::Impl::SetError(absl::Status err) {
|
||||||
|
// Ignore attempts to set a non-error.
|
||||||
|
if (err.ok()) return;
|
||||||
|
const std::lock_guard<std::mutex> lock(result_mutex_);
|
||||||
|
// Don't overwrite any previous error.
|
||||||
|
if (result_.ok()) result_ = err;
|
||||||
|
}
|
||||||
|
|
||||||
|
Indexer::Worker::Worker(Indexer::Impl* impl)
|
||||||
|
: impl_(impl),
|
||||||
|
cdc_cfg_(impl_->Cfg().min_chunk_size, impl_->Cfg().avg_chunk_size,
|
||||||
|
impl_->Cfg().max_chunk_size) {}
|
||||||
|
|
||||||
|
void Indexer::Worker::Run() {
|
||||||
|
IndexerJob job;
|
||||||
|
while (impl_->GetNextJob(&job)) {
|
||||||
|
absl::Status err = IndexFile(job.filepath);
|
||||||
|
if (!err.ok()) {
|
||||||
|
impl_->SetError(err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status Indexer::Worker::IndexFile(const std::string& filepath) {
|
||||||
|
std::FILE* fin = std::fopen(filepath.c_str(), "rb");
|
||||||
|
if (!fin) {
|
||||||
|
return ErrnoToCanonicalStatus(
|
||||||
|
errno, absl::StrFormat("failed to open file '%s'", filepath));
|
||||||
|
}
|
||||||
|
path::FileCloser closer(fin);
|
||||||
|
std::fseek(fin, 0, SEEK_SET);
|
||||||
|
|
||||||
|
auto hdlr = absl::bind_front(&Indexer::Impl::AddChunk, impl_);
|
||||||
|
Indexer::Chunker chunker(cdc_cfg_, hdlr);
|
||||||
|
|
||||||
|
std::vector<uint8_t> buf(impl_->Cfg().read_block_size, 0);
|
||||||
|
int err = 0;
|
||||||
|
while (!std::feof(fin)) {
|
||||||
|
size_t cnt = std::fread(buf.data(), sizeof(uint8_t), buf.size(), fin);
|
||||||
|
err = std::ferror(fin);
|
||||||
|
if (err) {
|
||||||
|
return ErrnoToCanonicalStatus(
|
||||||
|
err, absl::StrFormat("failed to read from file '%s'", filepath));
|
||||||
|
}
|
||||||
|
if (cnt) {
|
||||||
|
chunker.Process(buf.data(), cnt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chunker.Finalize();
|
||||||
|
impl_->AddFile();
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
IndexerConfig::IndexerConfig()
|
||||||
|
: read_block_size(32 << 10),
|
||||||
|
min_chunk_size(0),
|
||||||
|
avg_chunk_size(0),
|
||||||
|
max_chunk_size(0),
|
||||||
|
max_chunk_size_step(0),
|
||||||
|
num_threads(0),
|
||||||
|
mask_s(0),
|
||||||
|
mask_l(0) {}
|
||||||
|
|
||||||
|
Indexer::Indexer() : impl_(nullptr) {}
|
||||||
|
|
||||||
|
Indexer::~Indexer() {
|
||||||
|
if (impl_) delete impl_;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status Indexer::Run(const IndexerConfig& cfg,
|
||||||
|
const std::vector<std::string>& inputs,
|
||||||
|
Indexer::ProgressFn fn) {
|
||||||
|
if (impl_) delete impl_;
|
||||||
|
impl_ = new Impl(cfg, inputs);
|
||||||
|
|
||||||
|
// Start the file creation workers.
|
||||||
|
std::vector<WorkerThread> workers(impl_->Config().num_threads);
|
||||||
|
for (auto it = workers.begin(); it != workers.end(); ++it) {
|
||||||
|
auto worker = new Worker(impl_);
|
||||||
|
it->worker = worker;
|
||||||
|
it->thrd = new std::thread(&Worker::Run, worker);
|
||||||
|
}
|
||||||
|
// Start the progress function worker.
|
||||||
|
std::thread prog(&Impl::TriggerProgress, impl_, fn);
|
||||||
|
|
||||||
|
// Wait for the workers to finish.
|
||||||
|
for (auto it = workers.begin(); it != workers.end(); ++it) {
|
||||||
|
it->thrd->join();
|
||||||
|
}
|
||||||
|
// Wait for the progress worker to finish.
|
||||||
|
impl_->SetDone(true);
|
||||||
|
prog.join();
|
||||||
|
|
||||||
|
return Error();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status Indexer::Error() const {
|
||||||
|
return impl_ ? impl_->Error() : absl::Status();
|
||||||
|
}
|
||||||
|
|
||||||
|
IndexerConfig Indexer::Config() const {
|
||||||
|
if (impl_) return impl_->Cfg();
|
||||||
|
return IndexerConfig();
|
||||||
|
}
|
||||||
|
|
||||||
|
Indexer::OpStats Indexer::Stats() const {
|
||||||
|
if (impl_) return impl_->Stats();
|
||||||
|
return Stats();
|
||||||
|
}
|
||||||
|
|
||||||
|
Indexer::ChunkSizeMap Indexer::ChunkSizes() const {
|
||||||
|
if (impl_) return impl_->ChunkSizes();
|
||||||
|
return Indexer::ChunkSizeMap();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline Indexer::OpStats::OpStats()
|
||||||
|
: total_files(0),
|
||||||
|
total_chunks(0),
|
||||||
|
unique_chunks(0),
|
||||||
|
total_bytes(0),
|
||||||
|
unique_bytes(0) {}
|
||||||
|
|
||||||
|
}; // namespace cdc_ft
|
||||||
145
cdc_indexer/indexer.h
Normal file
145
cdc_indexer/indexer.h
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_INDEXER_INDEXER_H_
|
||||||
|
#define CDC_INDEXER_INDEXER_H_
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/time/time.h"
|
||||||
|
#include "fastcdc/fastcdc.h"
|
||||||
|
|
||||||
|
// Compile-time parameters for the FastCDC algorithm.
|
||||||
|
#define CDC_GEAR_32BIT 1
|
||||||
|
#define CDC_GEAR_64BIT 2
|
||||||
|
#ifndef CDC_GEAR_TABLE
|
||||||
|
#define CDC_GEAR_TABLE CDC_GEAR_64BIT
|
||||||
|
#endif
|
||||||
|
#ifndef CDC_MASK_STAGES
|
||||||
|
#define CDC_MASK_STAGES 7
|
||||||
|
#endif
|
||||||
|
#ifndef CDC_MASK_BIT_LSHIFT_AMOUNT
|
||||||
|
#define CDC_MASK_BIT_LSHIFT_AMOUNT 3
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
struct IndexerConfig {
|
||||||
|
// The hash function to use.
|
||||||
|
enum class HashType {
|
||||||
|
kUndefined = 0,
|
||||||
|
// No hashing performed, always return an empty string.
|
||||||
|
kNull,
|
||||||
|
// Use BLAKE3 (cryptographic)
|
||||||
|
kBlake3,
|
||||||
|
};
|
||||||
|
IndexerConfig();
|
||||||
|
// Read file contents in the given block size from disk, defaults to 4K.
|
||||||
|
size_t read_block_size;
|
||||||
|
// The minimum allowed chunk size, defaults to avg_chunk_size/2.
|
||||||
|
size_t min_chunk_size;
|
||||||
|
// The target average chunk size.
|
||||||
|
size_t avg_chunk_size;
|
||||||
|
// The maximum allowed chunk size, defaults to 2*avg_chunk_size.
|
||||||
|
size_t max_chunk_size;
|
||||||
|
// Max. step size for bucketing the chunk size distribution.
|
||||||
|
size_t max_chunk_size_step;
|
||||||
|
// How many operations to run in parallel. If this value is zero, then
|
||||||
|
// `std::thread::hardware_concurrency()` is used.
|
||||||
|
uint32_t num_threads;
|
||||||
|
// Which hash function to use.
|
||||||
|
HashType hash_type;
|
||||||
|
// The masks will be populated by the indexer, setting them here has no
|
||||||
|
// effect. They are in this struct so that they can be conveniently accessed
|
||||||
|
// when printing the operation summary (and since they are derived from the
|
||||||
|
// configuration, they are technically part of it).
|
||||||
|
uint64_t mask_s;
|
||||||
|
uint64_t mask_l;
|
||||||
|
};
|
||||||
|
|
||||||
|
class Indexer {
|
||||||
|
public:
|
||||||
|
using hash_t = std::string;
|
||||||
|
#if CDC_GEAR_TABLE == CDC_GEAR_32BIT
|
||||||
|
typedef fastcdc::Chunker32<CDC_MASK_STAGES, CDC_MASK_BIT_LSHIFT_AMOUNT>
|
||||||
|
Chunker;
|
||||||
|
#elif CDC_GEAR_TABLE == CDC_GEAR_64BIT
|
||||||
|
typedef fastcdc::Chunker64<CDC_MASK_STAGES, CDC_MASK_BIT_LSHIFT_AMOUNT>
|
||||||
|
Chunker;
|
||||||
|
#else
|
||||||
|
#error "Unknown gear table"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Represents a chunk.
|
||||||
|
struct Chunk {
|
||||||
|
hash_t hash;
|
||||||
|
size_t size;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Chunk storage, keyed by hash. The hash value must be mapped to a uint64_t
|
||||||
|
// value here, which is only acceptable for an experimental program like this.
|
||||||
|
typedef std::unordered_map<hash_t, Chunk> ChunkMap;
|
||||||
|
// Used for counting number of chunks in size buckets.
|
||||||
|
typedef std::unordered_map<size_t, uint64_t> ChunkSizeMap;
|
||||||
|
|
||||||
|
// Statistics about the current operation.
|
||||||
|
struct OpStats {
|
||||||
|
OpStats();
|
||||||
|
size_t total_files;
|
||||||
|
size_t total_chunks;
|
||||||
|
size_t unique_chunks;
|
||||||
|
size_t total_bytes;
|
||||||
|
size_t unique_bytes;
|
||||||
|
absl::Duration elapsed;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Defines a callback function that can be used to display progress updates
|
||||||
|
// while the Indexer is busy.
|
||||||
|
typedef void(ProgressFn)(const OpStats& stats);
|
||||||
|
|
||||||
|
Indexer();
|
||||||
|
~Indexer();
|
||||||
|
|
||||||
|
// Starts the indexing operation for the given configuration `cfg` and
|
||||||
|
// `inputs`. The optional callback function `fn` is called periodically with
|
||||||
|
// statistics about the ongoing operation.
|
||||||
|
absl::Status Run(const IndexerConfig& cfg,
|
||||||
|
const std::vector<std::string>& inputs, ProgressFn fn);
|
||||||
|
// Returns the status of the ongoing or completed operation.
|
||||||
|
absl::Status Error() const;
|
||||||
|
// Returns the configuration that was passed to Run().
|
||||||
|
IndexerConfig Config() const;
|
||||||
|
// Returns the statistics about the ongoing or completed operation.
|
||||||
|
OpStats Stats() const;
|
||||||
|
// Returns a map of chunk sizes to the number of occurrences. The sizes are
|
||||||
|
// combined to buckets according to the given `IndexerConfig` of the Run()
|
||||||
|
// operation.
|
||||||
|
ChunkSizeMap ChunkSizes() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Impl;
|
||||||
|
class Worker;
|
||||||
|
class WorkerThread;
|
||||||
|
Impl* impl_;
|
||||||
|
};
|
||||||
|
|
||||||
|
}; // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_INDEXER_INDEXER_H_
|
||||||
435
cdc_indexer/main.cc
Normal file
435
cdc_indexer/main.cc
Normal file
@@ -0,0 +1,435 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#include "absl/flags/flag.h"
|
||||||
|
#include "absl/flags/parse.h"
|
||||||
|
#include "absl/flags/usage.h"
|
||||||
|
#include "absl/flags/usage_config.h"
|
||||||
|
#include "absl/random/random.h"
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/strings/match.h"
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "absl_helper/jedec_size_flag.h"
|
||||||
|
#include "cdc_indexer/indexer.h"
|
||||||
|
#include "common/errno_mapping.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
|
||||||
|
ABSL_FLAG(std::vector<std::string>, inputs, std::vector<std::string>(),
|
||||||
|
"List of input files or directory to read from.");
|
||||||
|
ABSL_FLAG(uint32_t, num_threads, 0,
|
||||||
|
"How many threads should read files in parallel, use 0 to "
|
||||||
|
"auto-dertermine the best concurrency for this machine.");
|
||||||
|
ABSL_FLAG(cdc_ft::JedecSize, min_chunk_size, cdc_ft::JedecSize(0),
|
||||||
|
"The minimum chunk size to size the files into. Defaults to half of "
|
||||||
|
"the average chunk size. Supports common unit suffixes K, M, G.");
|
||||||
|
ABSL_FLAG(cdc_ft::JedecSize, avg_chunk_size, cdc_ft::JedecSize(256 << 10),
|
||||||
|
"The average chunk size to size the files into. Supports common "
|
||||||
|
"unit suffixes K, M, G.");
|
||||||
|
ABSL_FLAG(cdc_ft::JedecSize, max_chunk_size, cdc_ft::JedecSize(0),
|
||||||
|
"The maximum chunk size to size the files into. Defaults to twice "
|
||||||
|
"the average chunk size. Supports common unit suffixes K, M, G.");
|
||||||
|
ABSL_FLAG(cdc_ft::JedecSize, read_block_size, cdc_ft::JedecSize(0),
|
||||||
|
"The block size to read the input file(s) from disk. Defaults to the "
|
||||||
|
"value of --max_chunk_size. Supports common unit suffixes K, M, G.");
|
||||||
|
ABSL_FLAG(std::string, hash, "blake3",
|
||||||
|
"Which hash function to use. Supported values are \"blake3\" and "
|
||||||
|
"\"null\".");
|
||||||
|
ABSL_FLAG(std::string, results_file, "",
|
||||||
|
"File name to append results to in CVS format.");
|
||||||
|
ABSL_FLAG(std::string, description, "",
|
||||||
|
"A descriptive string of the experiment that was run. If given, this "
|
||||||
|
"will be prepended literally to the results_file. Multiple columns "
|
||||||
|
"can be separated with commas.");
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
const char* GearTable() {
|
||||||
|
// The following macros are defined in indexer.h.
|
||||||
|
#if CDC_GEAR_TABLE == CDC_GEAR_32BIT
|
||||||
|
return "32 bit";
|
||||||
|
#elif CDC_GEAR_TABLE == CDC_GEAR_64BIT
|
||||||
|
return "64 bit";
|
||||||
|
#else
|
||||||
|
#error "Unknown gear table"
|
||||||
|
return "unknown";
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetupFlagsHelp() {
|
||||||
|
absl::SetProgramUsageMessage(
|
||||||
|
"CDC indexer to measure and report data redundancy.");
|
||||||
|
absl::FlagsUsageConfig fuc;
|
||||||
|
// Filter flags to show when the --help flag is set.
|
||||||
|
fuc.contains_help_flags = [](absl::string_view f) {
|
||||||
|
return absl::EndsWith(f, "main.cc");
|
||||||
|
};
|
||||||
|
absl::SetFlagsUsageConfig(fuc);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints a human-readable representation of the given size, such as "4 KB".
|
||||||
|
template <typename T>
|
||||||
|
std::string HumanBytes(T size, int precision = 0) {
|
||||||
|
const size_t threshold = 2048;
|
||||||
|
if (size < 1024)
|
||||||
|
return absl::StrFormat("%d bytes", static_cast<size_t>(size));
|
||||||
|
double s = static_cast<double>(size) / 1024;
|
||||||
|
std::string units = "KB";
|
||||||
|
if (s > threshold) {
|
||||||
|
s /= 1024;
|
||||||
|
units = "MB";
|
||||||
|
}
|
||||||
|
if (s > threshold) {
|
||||||
|
s /= 1024;
|
||||||
|
units = "GB";
|
||||||
|
}
|
||||||
|
if (s > threshold) {
|
||||||
|
s /= 1024;
|
||||||
|
units = "TB";
|
||||||
|
}
|
||||||
|
if (s > threshold) {
|
||||||
|
s /= 1024;
|
||||||
|
units = "PB";
|
||||||
|
}
|
||||||
|
return absl::StrFormat("%.*f %s", precision, s, units);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints a human-readable representation of a duration as minutes and seconds
|
||||||
|
// in the format "m:ss".
|
||||||
|
std::string HumanDuration(const absl::Duration& d) {
|
||||||
|
auto sec = absl::ToInt64Seconds(d);
|
||||||
|
return absl::StrFormat("%02d:%02d", sec / 60, std::abs(sec) % 60);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string HashTypeToString(IndexerConfig::HashType type) {
|
||||||
|
switch (type) {
|
||||||
|
case IndexerConfig::HashType::kNull:
|
||||||
|
return "(no hashing)";
|
||||||
|
case IndexerConfig::HashType::kBlake3:
|
||||||
|
return "BLAKE3";
|
||||||
|
default:
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints progress information on stdout.
|
||||||
|
void ShowProgress(const Indexer::OpStats& stats) {
|
||||||
|
static absl::Time op_start = absl::Now();
|
||||||
|
static absl::Time last_progress = op_start;
|
||||||
|
static size_t last_total_bytes = 0;
|
||||||
|
|
||||||
|
auto now = absl::Now();
|
||||||
|
auto elapsed = now - last_progress;
|
||||||
|
if (elapsed < absl::Milliseconds(500)) return;
|
||||||
|
|
||||||
|
double bps =
|
||||||
|
(stats.total_bytes - last_total_bytes) / absl::ToDoubleSeconds(elapsed);
|
||||||
|
double dedup_pct = (stats.total_bytes - stats.unique_bytes) /
|
||||||
|
static_cast<double>(stats.total_bytes) * 100.0;
|
||||||
|
std::cout << '\r' << HumanDuration(now - op_start) << " " << std::setw(2)
|
||||||
|
<< HumanBytes(stats.total_bytes, 2) << " in " << stats.total_files
|
||||||
|
<< " files processed at " << HumanBytes(bps, 1) << "/s"
|
||||||
|
<< ", " << static_cast<int>(dedup_pct) << "% deduplication"
|
||||||
|
<< std::flush;
|
||||||
|
last_progress = now;
|
||||||
|
last_total_bytes = stats.total_bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ShowSummary(const IndexerConfig& cfg, const Indexer::OpStats& stats,
|
||||||
|
absl::Duration elapsed) {
|
||||||
|
const int title_w = 20;
|
||||||
|
const int num_w = 16;
|
||||||
|
double dedup_pct = (stats.total_bytes - stats.unique_bytes) /
|
||||||
|
static_cast<double>(stats.total_bytes) * 100.0;
|
||||||
|
double bps = stats.total_bytes / absl::ToDoubleSeconds(elapsed);
|
||||||
|
std::cout << "Chunk size (min/avg/max): " << HumanBytes(cfg.min_chunk_size)
|
||||||
|
<< " / " << HumanBytes(cfg.avg_chunk_size) << " / "
|
||||||
|
<< HumanBytes(cfg.max_chunk_size)
|
||||||
|
<< " | Hash: " << HashTypeToString(cfg.hash_type)
|
||||||
|
<< " | Threads: " << cfg.num_threads << std::endl;
|
||||||
|
std::cout << "gear_table: " << GearTable() << " | mask_s: 0x" << std::hex
|
||||||
|
<< cfg.mask_s << " | mask_l: 0x" << cfg.mask_l << std::dec
|
||||||
|
<< std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Duration:" << std::setw(num_w)
|
||||||
|
<< HumanDuration(elapsed) << std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Total files:" << std::setw(num_w)
|
||||||
|
<< stats.total_files << std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Total chunks:" << std::setw(num_w)
|
||||||
|
<< stats.total_chunks << std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Unique chunks:" << std::setw(num_w)
|
||||||
|
<< stats.unique_chunks << std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Total data:" << std::setw(num_w)
|
||||||
|
<< HumanBytes(stats.total_bytes, 2) << std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Unique data:" << std::setw(num_w)
|
||||||
|
<< HumanBytes(stats.unique_bytes, 2) << std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Throughput:" << std::setw(num_w - 2)
|
||||||
|
<< HumanBytes(bps, 2) << "/s" << std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Avg. chunk size:" << std::setw(num_w)
|
||||||
|
<< HumanBytes(static_cast<double>(stats.unique_bytes) /
|
||||||
|
stats.unique_chunks)
|
||||||
|
<< std::endl;
|
||||||
|
std::cout << std::setw(title_w) << "Deduplication:" << std::setw(num_w - 1)
|
||||||
|
<< std::setprecision(4) << dedup_pct << "%" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ShowChunkSize(size_t size, uint64_t cnt, uint64_t max_count,
|
||||||
|
uint64_t total_count) {
|
||||||
|
const int key_w = 7;
|
||||||
|
const int hbar_w = 40;
|
||||||
|
const int num_w = 10;
|
||||||
|
const int pct_w = 2;
|
||||||
|
|
||||||
|
double pct = 100.0 * static_cast<double>(cnt) / total_count;
|
||||||
|
double hscale = static_cast<double>(cnt) / max_count;
|
||||||
|
int blocks = round(hscale * hbar_w);
|
||||||
|
|
||||||
|
std::cout << std::setw(key_w) << HumanBytes(size) << " ";
|
||||||
|
for (int i = 0; i < blocks; i++) std::cout << "#";
|
||||||
|
for (int i = hbar_w - blocks; i > 0; i--) std::cout << " ";
|
||||||
|
std::cout << " " << std::setw(num_w) << cnt << " (" << std::setw(pct_w)
|
||||||
|
<< round(pct) << "%)" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<size_t> ChunkSizeBuckets(const IndexerConfig& cfg,
|
||||||
|
const Indexer::ChunkSizeMap& sizes,
|
||||||
|
size_t fixed_min_size,
|
||||||
|
size_t fixed_max_size,
|
||||||
|
uint64_t* max_count_out,
|
||||||
|
uint64_t* total_count_out) {
|
||||||
|
size_t min_size = 1u << 31;
|
||||||
|
size_t max_size = 0;
|
||||||
|
uint64_t max_count = 0;
|
||||||
|
uint64_t total_count = 0, found_count = 0;
|
||||||
|
uint64_t outside_min_max_count = 0;
|
||||||
|
std::vector<size_t> buckets;
|
||||||
|
// Find out min/max chunk sizes
|
||||||
|
for (auto [chunk_size, count] : sizes) {
|
||||||
|
if (chunk_size < min_size) min_size = chunk_size;
|
||||||
|
if (chunk_size > max_size) max_size = chunk_size;
|
||||||
|
if (count > max_count) max_count = count;
|
||||||
|
if (chunk_size < fixed_min_size) outside_min_max_count += count;
|
||||||
|
if (fixed_max_size > 0 && chunk_size > fixed_max_size)
|
||||||
|
outside_min_max_count += count;
|
||||||
|
total_count += count;
|
||||||
|
}
|
||||||
|
if (fixed_min_size > 0) min_size = fixed_min_size;
|
||||||
|
// Use steps of powers of two until min. chunk size is reached.
|
||||||
|
uint64_t size;
|
||||||
|
uint64_t pow_end_size = std::min(cfg.min_chunk_size, max_size);
|
||||||
|
for (size = min_size; size < pow_end_size; size <<= 1) {
|
||||||
|
buckets.push_back(size);
|
||||||
|
auto it = sizes.find(size);
|
||||||
|
if (it != sizes.end()) found_count += it->second;
|
||||||
|
}
|
||||||
|
if (fixed_max_size > max_size) max_size = fixed_max_size;
|
||||||
|
// Use step increments of max_chunk_size_step afterwards.
|
||||||
|
for (; size <= max_size; size += cfg.max_chunk_size_step) {
|
||||||
|
buckets.push_back(size);
|
||||||
|
auto it = sizes.find(size);
|
||||||
|
if (it != sizes.end()) found_count += it->second;
|
||||||
|
}
|
||||||
|
// Make sure we found every bucket.
|
||||||
|
assert(total_count == found_count + outside_min_max_count);
|
||||||
|
if (max_count_out) *max_count_out = max_count;
|
||||||
|
if (total_count_out) *total_count_out = total_count;
|
||||||
|
return buckets;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ShowChunkSizes(const IndexerConfig& cfg,
|
||||||
|
const Indexer::ChunkSizeMap& sizes) {
|
||||||
|
uint64_t max_count = 0;
|
||||||
|
uint64_t total_count = 0;
|
||||||
|
auto buckets = ChunkSizeBuckets(cfg, sizes, 0, 0, &max_count, &total_count);
|
||||||
|
for (auto size : buckets) {
|
||||||
|
auto it = sizes.find(size);
|
||||||
|
uint64_t cnt = it != sizes.end() ? it->second : 0;
|
||||||
|
ShowChunkSize(size, cnt, max_count, total_count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status WriteResultsFile(const std::string& filepath,
|
||||||
|
const std::string& description,
|
||||||
|
const IndexerConfig& cfg,
|
||||||
|
const Indexer::OpStats& stats,
|
||||||
|
const Indexer::ChunkSizeMap& sizes) {
|
||||||
|
bool exists = path::FileExists(filepath);
|
||||||
|
std::FILE* fout = std::fopen(filepath.c_str(), "a");
|
||||||
|
if (!fout) {
|
||||||
|
return ErrnoToCanonicalStatus(
|
||||||
|
errno, absl::StrFormat("Couldn't write to file '%s'", filepath));
|
||||||
|
}
|
||||||
|
|
||||||
|
path::FileCloser closer(fout);
|
||||||
|
|
||||||
|
static constexpr int num_columns = 15;
|
||||||
|
static const char* columns[num_columns] = {
|
||||||
|
"gear_table",
|
||||||
|
"mask_s",
|
||||||
|
"mask_l",
|
||||||
|
"Min chunk size [KiB]",
|
||||||
|
"Avg chunk size [KiB]",
|
||||||
|
"Max chunk size [KiB]",
|
||||||
|
"Read speed [MiB/s]",
|
||||||
|
"Files",
|
||||||
|
"Total chunks",
|
||||||
|
"Unique chunks",
|
||||||
|
"Total size [MiB]",
|
||||||
|
"Unique size [MiB]",
|
||||||
|
"Dedup size [MiB]",
|
||||||
|
"Dedup ratio",
|
||||||
|
"Res avg chunk size [KiB]",
|
||||||
|
};
|
||||||
|
|
||||||
|
auto buckets = ChunkSizeBuckets(cfg, sizes, cfg.min_chunk_size,
|
||||||
|
cfg.max_chunk_size, nullptr, nullptr);
|
||||||
|
// Write column headers this is a new file.
|
||||||
|
if (!exists) {
|
||||||
|
// Write empty columns corresponding to the no. of given columns.
|
||||||
|
int desc_cols = description.empty() ? 0 : 1;
|
||||||
|
desc_cols += std::count(description.begin(), description.end(), ',');
|
||||||
|
for (int i = 0; i < desc_cols; i++) {
|
||||||
|
std::fprintf(fout, i == 0 ? "Description," : ",");
|
||||||
|
}
|
||||||
|
// Write fixed column headers.
|
||||||
|
for (int i = 0; i < num_columns; i++) {
|
||||||
|
std::fprintf(fout, "%s,", columns[i]);
|
||||||
|
}
|
||||||
|
// Write chunk distribution column headers
|
||||||
|
for (auto size : buckets) {
|
||||||
|
std::fprintf(fout, "%s,", HumanBytes(size).c_str());
|
||||||
|
}
|
||||||
|
std::fprintf(fout, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count allow chunks below min_chunk_size and above max_chunk_size as they
|
||||||
|
// won't be included in the buckets list automatically.
|
||||||
|
uint64_t below_min_cnt = 0, above_max_cnt = 0;
|
||||||
|
for (auto [chunk_size, count] : sizes) {
|
||||||
|
if (chunk_size < cfg.min_chunk_size) below_min_cnt += count;
|
||||||
|
if (chunk_size > cfg.max_chunk_size) above_max_cnt += count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr double mib = static_cast<double>(1 << 20);
|
||||||
|
|
||||||
|
// Write user-supplied description
|
||||||
|
if (!description.empty()) std::fprintf(fout, "%s,", description.c_str());
|
||||||
|
// Write chunking params.
|
||||||
|
std::fprintf(fout, "%s,0x%zx,0x%zx,", GearTable(), cfg.mask_s, cfg.mask_l);
|
||||||
|
std::fprintf(fout, "%zu,%zu,%zu,", cfg.min_chunk_size >> 10,
|
||||||
|
cfg.avg_chunk_size >> 10, cfg.max_chunk_size >> 10);
|
||||||
|
// Write speed, files, chunks.
|
||||||
|
double mibps =
|
||||||
|
(stats.total_bytes / mib) / absl::ToDoubleSeconds(stats.elapsed);
|
||||||
|
std::fprintf(fout, "%f,%zu,%zu,%zu,", mibps, stats.total_files,
|
||||||
|
stats.total_chunks, stats.unique_chunks);
|
||||||
|
// Write total and unique sizes.
|
||||||
|
std::fprintf(fout, "%f,%f,%f,", stats.total_bytes / mib,
|
||||||
|
stats.unique_bytes / mib,
|
||||||
|
(stats.total_bytes - stats.unique_bytes) / mib);
|
||||||
|
// Write dedup ratio and avg. chunk size.
|
||||||
|
double dedup_ratio = (stats.total_bytes - stats.unique_bytes) /
|
||||||
|
static_cast<double>(stats.total_bytes);
|
||||||
|
size_t avg_size = stats.unique_bytes / stats.unique_chunks;
|
||||||
|
std::fprintf(fout, "%f,%zu,", dedup_ratio, avg_size >> 10);
|
||||||
|
// Write chunk distribution
|
||||||
|
size_t index = 0;
|
||||||
|
for (auto size : buckets) {
|
||||||
|
auto it = sizes.find(size);
|
||||||
|
uint64_t cnt = it != sizes.end() ? it->second : 0;
|
||||||
|
if (index == 0) {
|
||||||
|
cnt += below_min_cnt;
|
||||||
|
} else if (index + 1 == buckets.size()) {
|
||||||
|
cnt += above_max_cnt;
|
||||||
|
}
|
||||||
|
++index;
|
||||||
|
std::fprintf(fout, "%f,", static_cast<double>(cnt) / stats.unique_chunks);
|
||||||
|
}
|
||||||
|
std::fprintf(fout, "\n");
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
IndexerConfig::HashType GetHashType(const std::string name) {
|
||||||
|
if (name == "null") return IndexerConfig::HashType::kNull;
|
||||||
|
if (name == "blake3") return IndexerConfig::HashType::kBlake3;
|
||||||
|
std::cerr << "Unknown hash type: \"" << name << "\"" << std::endl;
|
||||||
|
return IndexerConfig::HashType::kUndefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
cdc_ft::SetupFlagsHelp();
|
||||||
|
absl::ParseCommandLine(argc, argv);
|
||||||
|
|
||||||
|
std::vector<std::string> inputs = absl::GetFlag(FLAGS_inputs);
|
||||||
|
|
||||||
|
if (inputs.empty()) {
|
||||||
|
std::cout << "Execute the following command to get help on the usage:"
|
||||||
|
<< std::endl
|
||||||
|
<< argv[0] << " --help" << std::endl;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
cdc_ft::IndexerConfig cfg;
|
||||||
|
cfg.num_threads = absl::GetFlag(FLAGS_num_threads);
|
||||||
|
cfg.min_chunk_size = absl::GetFlag(FLAGS_min_chunk_size).Size();
|
||||||
|
cfg.avg_chunk_size = absl::GetFlag(FLAGS_avg_chunk_size).Size();
|
||||||
|
cfg.max_chunk_size = absl::GetFlag(FLAGS_max_chunk_size).Size();
|
||||||
|
cfg.read_block_size = absl::GetFlag(FLAGS_read_block_size).Size();
|
||||||
|
cfg.hash_type = cdc_ft::GetHashType(absl::GetFlag(FLAGS_hash));
|
||||||
|
|
||||||
|
if (!cfg.min_chunk_size) cfg.min_chunk_size = cfg.avg_chunk_size >> 1;
|
||||||
|
if (!cfg.max_chunk_size) cfg.max_chunk_size = cfg.avg_chunk_size << 1;
|
||||||
|
if (!cfg.read_block_size) cfg.read_block_size = cfg.max_chunk_size;
|
||||||
|
cfg.max_chunk_size_step = std::max<size_t>(cfg.min_chunk_size >> 2, 1024u);
|
||||||
|
assert(cfg.avg_chunk_size > 0);
|
||||||
|
assert(cfg.avg_chunk_size > cfg.min_chunk_size);
|
||||||
|
assert(cfg.avg_chunk_size < cfg.max_chunk_size);
|
||||||
|
assert(cfg.hash_type != cdc_ft::IndexerConfig::HashType::kUndefined);
|
||||||
|
|
||||||
|
cdc_ft::Indexer idx;
|
||||||
|
std::cout << "Starting indexer on " << inputs.size() << " inputs."
|
||||||
|
<< std::endl;
|
||||||
|
static absl::Time start = absl::Now();
|
||||||
|
absl::Status res = idx.Run(cfg, inputs, cdc_ft::ShowProgress);
|
||||||
|
auto elapsed = absl::Now() - start;
|
||||||
|
std::cout << std::endl;
|
||||||
|
if (res.ok()) {
|
||||||
|
std::cout << "Operation succeeded." << std::endl << std::endl;
|
||||||
|
cdc_ft::ShowSummary(idx.Config(), idx.Stats(), elapsed);
|
||||||
|
std::cout << std::endl;
|
||||||
|
cdc_ft::ShowChunkSizes(idx.Config(), idx.ChunkSizes());
|
||||||
|
std::string results_file = absl::GetFlag(FLAGS_results_file);
|
||||||
|
if (!results_file.empty()) {
|
||||||
|
res = cdc_ft::WriteResultsFile(
|
||||||
|
results_file, absl::GetFlag(FLAGS_description), idx.Config(),
|
||||||
|
idx.Stats(), idx.ChunkSizes());
|
||||||
|
if (!res.ok())
|
||||||
|
std::cerr << "Failed to write results to '" << results_file
|
||||||
|
<< "': " << res.message() << std::endl;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std::cerr << "Error: (" << res.code() << ") " << res.message() << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
return static_cast<int>(res.code());
|
||||||
|
}
|
||||||
4
cdc_rsync/.gitignore
vendored
Normal file
4
cdc_rsync/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
x64/*
|
||||||
|
generated_protos
|
||||||
|
*.log
|
||||||
|
*.user
|
||||||
191
cdc_rsync/BUILD
Normal file
191
cdc_rsync/BUILD
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
load(
|
||||||
|
"//tools:windows_cc_library.bzl",
|
||||||
|
"cc_windows_shared_library",
|
||||||
|
)
|
||||||
|
|
||||||
|
package(default_visibility = [
|
||||||
|
"//:__subpackages__",
|
||||||
|
])
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "client_file_info",
|
||||||
|
hdrs = ["client_file_info.h"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "client_socket",
|
||||||
|
srcs = ["client_socket.cc"],
|
||||||
|
hdrs = ["client_socket.h"],
|
||||||
|
target_compatible_with = ["@platforms//os:windows"],
|
||||||
|
deps = [
|
||||||
|
"//cdc_rsync/base:socket",
|
||||||
|
"//common:log",
|
||||||
|
"//common:status",
|
||||||
|
"//common:util",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "file_finder_and_sender",
|
||||||
|
srcs = ["file_finder_and_sender.cc"],
|
||||||
|
hdrs = ["file_finder_and_sender.h"],
|
||||||
|
target_compatible_with = ["@platforms//os:windows"],
|
||||||
|
deps = [
|
||||||
|
":client_file_info",
|
||||||
|
"//cdc_rsync/base:message_pump",
|
||||||
|
"//cdc_rsync/protos:messages_cc_proto",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:path_filter",
|
||||||
|
"//common:platform",
|
||||||
|
"//common:util",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "file_finder_and_sender_test",
|
||||||
|
srcs = ["file_finder_and_sender_test.cc"],
|
||||||
|
data = ["testdata/root.txt"] + glob(["testdata/file_finder_and_sender/**"]),
|
||||||
|
deps = [
|
||||||
|
":file_finder_and_sender",
|
||||||
|
"//cdc_rsync/base:fake_socket",
|
||||||
|
"//cdc_rsync/protos:messages_cc_proto",
|
||||||
|
"//common:status_test_macros",
|
||||||
|
"//common:test_main",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
"@com_google_protobuf//:protobuf_lite",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_windows_shared_library(
|
||||||
|
name = "cdc_rsync",
|
||||||
|
srcs = [
|
||||||
|
"cdc_rsync.cc",
|
||||||
|
"cdc_rsync_client.cc",
|
||||||
|
"dllmain.cc",
|
||||||
|
],
|
||||||
|
hdrs = [
|
||||||
|
"cdc_rsync.h",
|
||||||
|
"cdc_rsync_client.h",
|
||||||
|
"error_messages.h",
|
||||||
|
],
|
||||||
|
linkopts = select({
|
||||||
|
"//tools:windows": [
|
||||||
|
"/DEFAULTLIB:Ws2_32.lib", # Sockets, e.g. recv, send, WSA*.
|
||||||
|
],
|
||||||
|
"//conditions:default": [],
|
||||||
|
}),
|
||||||
|
local_defines = ["COMPILING_DLL"],
|
||||||
|
target_compatible_with = ["@platforms//os:windows"],
|
||||||
|
deps = [
|
||||||
|
":client_socket",
|
||||||
|
":file_finder_and_sender",
|
||||||
|
":parallel_file_opener",
|
||||||
|
":progress_tracker",
|
||||||
|
":zstd_stream",
|
||||||
|
"//cdc_rsync/base:cdc_interface",
|
||||||
|
"//cdc_rsync/base:message_pump",
|
||||||
|
"//cdc_rsync/base:server_exit_code",
|
||||||
|
"//cdc_rsync/base:socket",
|
||||||
|
"//cdc_rsync/protos:messages_cc_proto",
|
||||||
|
"//common:gamelet_component",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:path_filter",
|
||||||
|
"//common:platform",
|
||||||
|
"//common:port_manager",
|
||||||
|
"//common:process",
|
||||||
|
"//common:remote_util",
|
||||||
|
"//common:sdk_util",
|
||||||
|
"//common:status",
|
||||||
|
"//common:status_macros",
|
||||||
|
"//common:threadpool",
|
||||||
|
"//common:util",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "parallel_file_opener",
|
||||||
|
srcs = ["parallel_file_opener.cc"],
|
||||||
|
hdrs = ["parallel_file_opener.h"],
|
||||||
|
data = ["testdata/root.txt"] + glob(["testdata/parallel_file_opener/**"]),
|
||||||
|
deps = [
|
||||||
|
":client_file_info",
|
||||||
|
"//common:path",
|
||||||
|
"//common:platform",
|
||||||
|
"//common:threadpool",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "parallel_file_opener_test",
|
||||||
|
srcs = ["parallel_file_opener_test.cc"],
|
||||||
|
deps = [
|
||||||
|
":parallel_file_opener",
|
||||||
|
"//common:test_main",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "progress_tracker",
|
||||||
|
srcs = ["progress_tracker.cc"],
|
||||||
|
hdrs = ["progress_tracker.h"],
|
||||||
|
deps = [
|
||||||
|
":file_finder_and_sender",
|
||||||
|
"//cdc_rsync/base:cdc_interface",
|
||||||
|
"//common:stopwatch",
|
||||||
|
"@com_github_jsoncpp//:jsoncpp",
|
||||||
|
"@com_google_absl//absl/strings:str_format",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "progress_tracker_test",
|
||||||
|
srcs = ["progress_tracker_test.cc"],
|
||||||
|
deps = [
|
||||||
|
":progress_tracker",
|
||||||
|
"//cdc_rsync/protos:messages_cc_proto",
|
||||||
|
"//common:test_main",
|
||||||
|
"//common:testing_clock",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "zstd_stream",
|
||||||
|
srcs = ["zstd_stream.cc"],
|
||||||
|
hdrs = ["zstd_stream.h"],
|
||||||
|
deps = [
|
||||||
|
":client_socket",
|
||||||
|
"//common:buffer",
|
||||||
|
"//common:status",
|
||||||
|
"//common:status_macros",
|
||||||
|
"//common:stopwatch",
|
||||||
|
"@com_github_zstd//:zstd",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "zstd_stream_test",
|
||||||
|
srcs = ["zstd_stream_test.cc"],
|
||||||
|
deps = [
|
||||||
|
":zstd_stream",
|
||||||
|
"//cdc_rsync/base:fake_socket",
|
||||||
|
"//cdc_rsync_server:unzstd_stream",
|
||||||
|
"//common:status_test_macros",
|
||||||
|
"//common:test_main",
|
||||||
|
"@com_github_zstd//:zstd",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all_test_sources",
|
||||||
|
srcs = glob(["*_test.cc"]),
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all_test_data",
|
||||||
|
srcs = glob(["testdata/**"]),
|
||||||
|
)
|
||||||
5
cdc_rsync/README.md
Normal file
5
cdc_rsync/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# CDC RSync
|
||||||
|
|
||||||
|
CDC RSync is a command line tool / library for uploading files to a remote machine in an rsync-like
|
||||||
|
fashion. It quickly skips files with matching timestamp and size, and only transfers deltas for
|
||||||
|
existing files.
|
||||||
92
cdc_rsync/base/BUILD
Normal file
92
cdc_rsync/base/BUILD
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package(default_visibility = [
|
||||||
|
"//:__subpackages__",
|
||||||
|
])
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "cdc_interface",
|
||||||
|
srcs = ["cdc_interface.cc"],
|
||||||
|
hdrs = ["cdc_interface.h"],
|
||||||
|
deps = [
|
||||||
|
":message_pump",
|
||||||
|
"//cdc_rsync/protos:messages_cc_proto",
|
||||||
|
"//common:buffer",
|
||||||
|
"//common:log",
|
||||||
|
"//common:path",
|
||||||
|
"//common:status",
|
||||||
|
"//common:threadpool",
|
||||||
|
"//fastcdc",
|
||||||
|
"@com_github_blake3//:blake3",
|
||||||
|
"@com_google_absl//absl/strings:str_format",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "cdc_interface_test",
|
||||||
|
srcs = ["cdc_interface_test.cc"],
|
||||||
|
data = ["testdata/root.txt"] + glob(["testdata/cdc_interface/**"]),
|
||||||
|
deps = [
|
||||||
|
":cdc_interface",
|
||||||
|
":fake_socket",
|
||||||
|
"//common:status_test_macros",
|
||||||
|
"//common:test_main",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "fake_socket",
|
||||||
|
srcs = ["fake_socket.cc"],
|
||||||
|
hdrs = ["fake_socket.h"],
|
||||||
|
deps = [
|
||||||
|
"//cdc_rsync/base:socket",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "message_pump",
|
||||||
|
srcs = ["message_pump.cc"],
|
||||||
|
hdrs = ["message_pump.h"],
|
||||||
|
deps = [
|
||||||
|
":socket",
|
||||||
|
"//common:buffer",
|
||||||
|
"//common:log",
|
||||||
|
"//common:status",
|
||||||
|
"@com_google_absl//absl/status",
|
||||||
|
"@com_google_absl//absl/strings:str_format",
|
||||||
|
"@com_google_protobuf//:protobuf_lite",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_test(
|
||||||
|
name = "message_pump_test",
|
||||||
|
srcs = ["message_pump_test.cc"],
|
||||||
|
deps = [
|
||||||
|
":fake_socket",
|
||||||
|
":message_pump",
|
||||||
|
"//cdc_rsync/protos:messages_cc_proto",
|
||||||
|
"//common:status_test_macros",
|
||||||
|
"//common:test_main",
|
||||||
|
"@com_google_googletest//:gtest",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "server_exit_code",
|
||||||
|
hdrs = ["server_exit_code.h"],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "socket",
|
||||||
|
hdrs = ["socket.h"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all_test_sources",
|
||||||
|
srcs = glob(["*_test.cc"]),
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all_test_data",
|
||||||
|
srcs = glob(["testdata/**"]),
|
||||||
|
)
|
||||||
670
cdc_rsync/base/cdc_interface.cc
Normal file
670
cdc_rsync/base/cdc_interface.cc
Normal file
@@ -0,0 +1,670 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/base/cdc_interface.h"
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "blake3.h"
|
||||||
|
#include "cdc_rsync/base/message_pump.h"
|
||||||
|
#include "cdc_rsync/protos/messages.pb.h"
|
||||||
|
#include "common/buffer.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/util.h"
|
||||||
|
#include "fastcdc/fastcdc.h"
|
||||||
|
|
||||||
|
#if PLATFORM_LINUX
|
||||||
|
#include <fcntl.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// The average chunk size should be as low as possible, but not too low.
|
||||||
|
// Lower sizes mean better delta-encoding and hence less data uploads.
|
||||||
|
// However, chunking becomes slower for lower sizes. At 8 KB, a gamelet can
|
||||||
|
// still process close to 700 MB/sec, which matches hard drive speed.
|
||||||
|
// Signature data rate is another factor. The gamelet generates signature data
|
||||||
|
// at a rate of 700 MB/sec / kAvgChunkSize * sizeof(Chunk) = 1.7 MB/sec for 8 KB
|
||||||
|
// chunks. That means, the client needs at least 16 MBit download bandwidth to
|
||||||
|
// stream signatures or else this part becomes slower. 4 KB chunks would require
|
||||||
|
// a 32 MBit connection.
|
||||||
|
constexpr size_t kAvgChunkSize = 8 * 1024;
|
||||||
|
constexpr size_t kMinChunkSize = kAvgChunkSize / 2;
|
||||||
|
constexpr size_t kMaxChunkSize = kAvgChunkSize * 4;
|
||||||
|
|
||||||
|
// This number was found by experimentally optimizing chunking throughput.
|
||||||
|
constexpr size_t kFileIoBufferSize = kMaxChunkSize * 4;
|
||||||
|
|
||||||
|
// Limits the size of contiguous patch chunks where data is copied from the
|
||||||
|
// basis file. Necessary since the server copies chunks in one go and doesn't
|
||||||
|
// split them up (would be possible, but unnecessarily complicates code).
|
||||||
|
constexpr size_t kCombinedChunkSizeThreshold = 64 * 1024;
|
||||||
|
|
||||||
|
// Number of hashing tasks in flight at a given point of time.
|
||||||
|
constexpr size_t kMaxNumHashTasks = 64;
|
||||||
|
|
||||||
|
#pragma pack(push, 1)
|
||||||
|
// 16 byte hashes guarantee a sufficiently low chance of hash collisions. For
|
||||||
|
// 8 byte the chance of a hash collision is actually quite high for large files
|
||||||
|
// 0.0004% for a 100 GB file and 8 KB chunks.
|
||||||
|
struct Hash {
|
||||||
|
uint64_t low;
|
||||||
|
uint64_t high;
|
||||||
|
|
||||||
|
bool operator==(const Hash& other) const {
|
||||||
|
return low == other.low && high == other.high;
|
||||||
|
}
|
||||||
|
bool operator!=(const Hash& other) const { return !(*this == other); }
|
||||||
|
};
|
||||||
|
#pragma pack(pop)
|
||||||
|
|
||||||
|
static_assert(sizeof(Hash) <= BLAKE3_OUT_LEN, "");
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
namespace std {
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct hash<cdc_ft::Hash> {
|
||||||
|
size_t operator()(const cdc_ft::Hash& hash) const { return hash.low; }
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace std
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Send a batch of signatures every 8 MB of processed data (~90 packets per
|
||||||
|
// second at 700 MB/sec processing rate). The size of each signature batch is
|
||||||
|
// kMinNumChunksPerBatch * sizeof(Chunk), e.g. 20 KB for an avg chunk size of
|
||||||
|
// 8 KB.
|
||||||
|
constexpr int kMinSigBatchDataSize = 8 * 1024 * 1024;
|
||||||
|
constexpr int kMinNumChunksPerBatch = kMinSigBatchDataSize / kAvgChunkSize;
|
||||||
|
|
||||||
|
// Send patch commands in batches of at least that size for efficiency.
|
||||||
|
constexpr int kPatchRequestSizeThreshold = 65536;
|
||||||
|
|
||||||
|
// 16 bytes hash, 4 bytes size = 20 bytes.
|
||||||
|
struct Chunk {
|
||||||
|
Hash hash;
|
||||||
|
uint32_t size = 0;
|
||||||
|
Chunk(const Hash& hash, uint32_t size) : hash(hash), size(size) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
Hash ComputeHash(const void* data, size_t size) {
|
||||||
|
assert(data);
|
||||||
|
Hash hash;
|
||||||
|
blake3_hasher hasher;
|
||||||
|
blake3_hasher_init(&hasher);
|
||||||
|
blake3_hasher_update(&hasher, data, size);
|
||||||
|
blake3_hasher_finalize(&hasher, reinterpret_cast<uint8_t*>(&hash),
|
||||||
|
sizeof(hash));
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Task that computes hashes for a single chunk and adds the result to
|
||||||
|
// AddSignaturesResponse.
|
||||||
|
class HashTask : public Task {
|
||||||
|
public:
|
||||||
|
HashTask() {}
|
||||||
|
~HashTask() {}
|
||||||
|
|
||||||
|
HashTask(const HashTask& other) = delete;
|
||||||
|
HashTask& operator=(HashTask&) = delete;
|
||||||
|
|
||||||
|
// Sets the data to compute the hash of.
|
||||||
|
// Should be called before queuing the task.
|
||||||
|
void SetData(const void* data, size_t size) {
|
||||||
|
buffer_.reserve(size);
|
||||||
|
buffer_.resize(size);
|
||||||
|
memcpy(buffer_.data(), data, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Appends the computed hash to |response|.
|
||||||
|
// Should be called once the task is finished.
|
||||||
|
void AppendHash(AddSignaturesResponse* response) const {
|
||||||
|
response->add_sizes(static_cast<uint32_t>(buffer_.size()));
|
||||||
|
std::string* hashes = response->mutable_hashes();
|
||||||
|
hashes->append(reinterpret_cast<const char*>(&hash_), sizeof(hash_));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ThreadRun(IsCancelledPredicate is_cancelled) override {
|
||||||
|
hash_ = ComputeHash(buffer_.data(), buffer_.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Buffer buffer_;
|
||||||
|
struct Hash hash_ = {0};
|
||||||
|
};
|
||||||
|
|
||||||
|
class ServerChunkReceiver {
|
||||||
|
public:
|
||||||
|
explicit ServerChunkReceiver(MessagePump* message_pump)
|
||||||
|
: message_pump_(message_pump) {
|
||||||
|
assert(message_pump_);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receives server signature packets and places the data into a map
|
||||||
|
// (chunk hash) -> (server-side file offset).
|
||||||
|
// If |block| is false, returns immediately if no data is available.
|
||||||
|
// If |block| is true, blocks until some data is available.
|
||||||
|
// |num_server_bytes_processed| is set to the total size of the chunks
|
||||||
|
// received.
|
||||||
|
absl::Status Receive(bool block, uint64_t* num_server_bytes_processed) {
|
||||||
|
assert(num_server_bytes_processed);
|
||||||
|
*num_server_bytes_processed = 0;
|
||||||
|
|
||||||
|
// Already all server chunks received?
|
||||||
|
if (all_chunks_received_) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no data is available, early out (unless blocking is requested).
|
||||||
|
if (!block && !message_pump_->CanReceive()) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive signatures.
|
||||||
|
AddSignaturesResponse response;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_->ReceiveMessage(PacketType::kAddSignatures, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive AddSignaturesResponse");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate size of packed hashes, just in case.
|
||||||
|
const int num_chunks = response.sizes_size();
|
||||||
|
if (response.hashes().size() != num_chunks * sizeof(Hash)) {
|
||||||
|
return MakeStatus("Bad hashes size. Expected %u. Actual %u.",
|
||||||
|
num_chunks * sizeof(Hash), response.hashes().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
// An empty packet marks the end of the server chunks.
|
||||||
|
if (num_chunks == 0) {
|
||||||
|
all_chunks_received_ = true;
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the data over to |server_chunk_offsets|.
|
||||||
|
const Hash* hashes =
|
||||||
|
reinterpret_cast<const Hash*>(response.hashes().data());
|
||||||
|
for (int n = 0; n < num_chunks; ++n) {
|
||||||
|
uint32_t size = response.sizes(n);
|
||||||
|
chunk_offsets_.insert({hashes[n], curr_offset_});
|
||||||
|
curr_offset_ += size;
|
||||||
|
*num_server_bytes_processed += size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// True if all server chunks have been received.
|
||||||
|
bool AllChunksReceived() const { return all_chunks_received_; }
|
||||||
|
|
||||||
|
// Returns a map (server chunk hash) -> (offset of that chunk in server file).
|
||||||
|
const std::unordered_map<Hash, uint64_t>& ChunkOffsets() const {
|
||||||
|
return chunk_offsets_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
MessagePump* message_pump_;
|
||||||
|
|
||||||
|
// Maps server chunk hashes to the file offset in the server file.
|
||||||
|
std::unordered_map<Hash, uint64_t> chunk_offsets_;
|
||||||
|
|
||||||
|
// Current server file offset.
|
||||||
|
uint64_t curr_offset_ = 0;
|
||||||
|
|
||||||
|
// Whether all server files have been received.
|
||||||
|
bool all_chunks_received_ = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
class PatchSender {
|
||||||
|
// 1 byte for source, 8 bytes for offset and 4 bytes for size.
|
||||||
|
static constexpr size_t kPatchMetadataSize =
|
||||||
|
sizeof(uint8_t) + sizeof(uint64_t) + sizeof(uint32_t);
|
||||||
|
|
||||||
|
public:
|
||||||
|
PatchSender(FILE* file, MessagePump* message_pump)
|
||||||
|
: file_(file), message_pump_(message_pump) {}
|
||||||
|
|
||||||
|
// Tries to send patch data for the next chunk in |client_chunks|. The class
|
||||||
|
// keeps an internal counter for the current chunk index. Patch data is not
|
||||||
|
// sent if the current client chunk is not found among the server chunks and
|
||||||
|
// there are outstanding server chunks. In that case, the method returns
|
||||||
|
// with an OK status and should be called later as soon as additional server
|
||||||
|
// chunks have been received.
|
||||||
|
// |num_client_bytes_processed| is set to the total size of the chunks added.
|
||||||
|
absl::Status TryAddChunks(const std::vector<Chunk>& client_chunks,
|
||||||
|
const ServerChunkReceiver& server_chunk_receiver,
|
||||||
|
uint64_t* num_client_bytes_processed) {
|
||||||
|
assert(num_client_bytes_processed);
|
||||||
|
*num_client_bytes_processed = 0;
|
||||||
|
|
||||||
|
while (curr_chunk_idx_ < client_chunks.size()) {
|
||||||
|
const Chunk& chunk = client_chunks[curr_chunk_idx_];
|
||||||
|
auto it = server_chunk_receiver.ChunkOffsets().find(chunk.hash);
|
||||||
|
bool exists = it != server_chunk_receiver.ChunkOffsets().end();
|
||||||
|
|
||||||
|
// If there are outstanding server chunks and the client hash is not
|
||||||
|
// found, do not send the patch data yet. A future server chunk might
|
||||||
|
// contain the data.
|
||||||
|
if (!exists && !server_chunk_receiver.AllChunksReceived()) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status status = exists ? AddExistingChunk(it->second, chunk.size)
|
||||||
|
: AddNewChunk(chunk.size);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to add chunk");
|
||||||
|
}
|
||||||
|
|
||||||
|
++curr_chunk_idx_;
|
||||||
|
*num_client_bytes_processed += chunk.size;
|
||||||
|
|
||||||
|
// Break loop if all server chunks are received. Otherwise, progress
|
||||||
|
// reporting is blocked.
|
||||||
|
if (server_chunk_receiver.AllChunksReceived()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sends the remaining patch commands and an EOF marker.
|
||||||
|
absl::Status Flush() {
|
||||||
|
if (request_size_ > 0) {
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_->SendMessage(PacketType::kAddPatchCommands, request_);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send final patch commands");
|
||||||
|
}
|
||||||
|
total_request_size_ += request_size_;
|
||||||
|
request_.Clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send an empty patch commands request as EOF marker.
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_->SendMessage(PacketType::kAddPatchCommands, request_);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send patch commands EOF marker");
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the (estimated) total size of all patch data sent.
|
||||||
|
uint64_t GetTotalRequestSize() const { return total_request_size_; }
|
||||||
|
|
||||||
|
// Index of the next client chunk.
|
||||||
|
size_t CurrChunkIdx() const { return curr_chunk_idx_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Adds patch data for a client chunk that has a matching server chunk of
|
||||||
|
// given |size| at given |offset| in the server file.
|
||||||
|
absl::Status AddExistingChunk(uint64_t offset, uint32_t size) {
|
||||||
|
int last_idx = request_.sources_size() - 1;
|
||||||
|
if (last_idx >= 0 &&
|
||||||
|
request_.sources(last_idx) ==
|
||||||
|
AddPatchCommandsRequest::SOURCE_BASIS_FILE &&
|
||||||
|
request_.offsets(last_idx) + request_.sizes(last_idx) == offset &&
|
||||||
|
request_.sizes(last_idx) < kCombinedChunkSizeThreshold) {
|
||||||
|
// Same source and contiguous data -> Append to last entry.
|
||||||
|
request_.set_sizes(last_idx, request_.sizes(last_idx) + size);
|
||||||
|
} else {
|
||||||
|
// Different source or first chunk -> Create new entry.
|
||||||
|
request_.add_sources(AddPatchCommandsRequest::SOURCE_BASIS_FILE);
|
||||||
|
request_.add_offsets(offset);
|
||||||
|
request_.add_sizes(size);
|
||||||
|
request_size_ += kPatchMetadataSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
return OnChunkAdded(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status AddNewChunk(uint32_t size) {
|
||||||
|
std::string* data = request_.mutable_data();
|
||||||
|
int last_idx = request_.sources_size() - 1;
|
||||||
|
if (last_idx >= 0 &&
|
||||||
|
request_.sources(last_idx) == AddPatchCommandsRequest::SOURCE_DATA) {
|
||||||
|
// Same source -> Append to last entry.
|
||||||
|
request_.set_sizes(last_idx, request_.sizes(last_idx) + size);
|
||||||
|
} else {
|
||||||
|
// Different source or first chunk -> Create new entry.
|
||||||
|
request_.add_sources(AddPatchCommandsRequest::SOURCE_DATA);
|
||||||
|
request_.add_offsets(data->size());
|
||||||
|
request_.add_sizes(size);
|
||||||
|
request_size_ += kPatchMetadataSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read data from client file into |data|. Be sure to restore the previous
|
||||||
|
// file offset as the chunker might still be processing the file.
|
||||||
|
size_t prev_size = data->size();
|
||||||
|
data->resize(prev_size + size);
|
||||||
|
int64_t prev_offset = ftell64(file_);
|
||||||
|
if (fseek64(file_, file_offset_, SEEK_SET) != 0 ||
|
||||||
|
fread(&(*data)[prev_size], 1, size, file_) != size ||
|
||||||
|
fseek64(file_, prev_offset, SEEK_SET) != 0) {
|
||||||
|
return MakeStatus("Failed to read %u bytes at offset %u", size,
|
||||||
|
file_offset_);
|
||||||
|
}
|
||||||
|
request_size_ += size;
|
||||||
|
|
||||||
|
return OnChunkAdded(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status OnChunkAdded(uint32_t size) {
|
||||||
|
file_offset_ += size;
|
||||||
|
|
||||||
|
// Send patch commands if there's enough data.
|
||||||
|
if (request_size_ > kPatchRequestSizeThreshold) {
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_->SendMessage(PacketType::kAddPatchCommands, request_);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send patch commands");
|
||||||
|
}
|
||||||
|
total_request_size_ += request_size_;
|
||||||
|
request_size_ = 0;
|
||||||
|
request_.Clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
FILE* file_;
|
||||||
|
MessagePump* message_pump_;
|
||||||
|
|
||||||
|
AddPatchCommandsRequest request_;
|
||||||
|
size_t request_size_ = 0;
|
||||||
|
size_t total_request_size_ = 0;
|
||||||
|
uint64_t file_offset_ = 0;
|
||||||
|
size_t curr_chunk_idx_ = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
CdcInterface::CdcInterface(MessagePump* message_pump)
|
||||||
|
: message_pump_(message_pump) {}
|
||||||
|
|
||||||
|
absl::Status CdcInterface::CreateAndSendSignature(const std::string& filepath) {
|
||||||
|
absl::StatusOr<FILE*> file = path::OpenFile(filepath, "rb");
|
||||||
|
if (!file.ok()) {
|
||||||
|
return file.status();
|
||||||
|
}
|
||||||
|
#if PLATFORM_LINUX
|
||||||
|
// Tell the kernel we'll load the file sequentially (improves IO bandwidth).
|
||||||
|
posix_fadvise(fileno(*file), 0, 0, POSIX_FADV_SEQUENTIAL);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Use a background thread for computing hashes on the server.
|
||||||
|
// Allocate lazily since it is not needed on the client.
|
||||||
|
// MUST NOT use more than 1 worker thread since the order of finished tasks
|
||||||
|
// would then not necessarily match the pushing order. However, the order is
|
||||||
|
// important for computing offsets.
|
||||||
|
if (!hash_pool_) hash_pool_ = std::make_unique<Threadpool>(1);
|
||||||
|
|
||||||
|
// |chunk_handler| is called for each CDC chunk. It pushes a hash task to the
|
||||||
|
// pool. Tasks are "recycled" from |free_tasks_|, so that buffers don't have
|
||||||
|
// to reallocated constantly.
|
||||||
|
size_t num_hash_tasks = 0;
|
||||||
|
auto chunk_handler = [pool = hash_pool_.get(), &num_hash_tasks,
|
||||||
|
free_tasks = &free_tasks_](const void* data,
|
||||||
|
size_t size) {
|
||||||
|
++num_hash_tasks;
|
||||||
|
if (free_tasks->empty()) {
|
||||||
|
free_tasks->push_back(std::make_unique<HashTask>());
|
||||||
|
}
|
||||||
|
std::unique_ptr<Task> task = std::move(free_tasks->back());
|
||||||
|
free_tasks->pop_back();
|
||||||
|
static_cast<HashTask*>(task.get())->SetData(data, size);
|
||||||
|
pool->QueueTask(std::move(task));
|
||||||
|
};
|
||||||
|
|
||||||
|
fastcdc::Config config(kMinChunkSize, kAvgChunkSize, kMaxChunkSize);
|
||||||
|
fastcdc::Chunker chunker(config, chunk_handler);
|
||||||
|
|
||||||
|
AddSignaturesResponse response;
|
||||||
|
auto read_handler = [&chunker, &response, pool = hash_pool_.get(),
|
||||||
|
&num_hash_tasks, free_tasks = &free_tasks_,
|
||||||
|
message_pump = message_pump_](const void* data,
|
||||||
|
size_t size) {
|
||||||
|
chunker.Process(static_cast<const uint8_t*>(data), size);
|
||||||
|
|
||||||
|
// Finish hashing tasks. Block if there are too many of them in flight.
|
||||||
|
for (;;) {
|
||||||
|
std::unique_ptr<Task> task = num_hash_tasks >= kMaxNumHashTasks
|
||||||
|
? pool->GetCompletedTask()
|
||||||
|
: pool->TryGetCompletedTask();
|
||||||
|
if (!task) break;
|
||||||
|
num_hash_tasks--;
|
||||||
|
static_cast<HashTask*>(task.get())->AppendHash(&response);
|
||||||
|
free_tasks->push_back(std::move(task));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send data if we have enough chunks.
|
||||||
|
if (response.sizes_size() >= kMinNumChunksPerBatch) {
|
||||||
|
absl::Status status =
|
||||||
|
message_pump->SendMessage(PacketType::kAddSignatures, response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send signatures");
|
||||||
|
}
|
||||||
|
response.Clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
};
|
||||||
|
|
||||||
|
absl::Status status =
|
||||||
|
path::StreamReadFileContents(*file, kFileIoBufferSize, read_handler);
|
||||||
|
fclose(*file);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to compute signatures");
|
||||||
|
}
|
||||||
|
chunker.Finalize();
|
||||||
|
|
||||||
|
// Finish hashing tasks.
|
||||||
|
hash_pool_->Wait();
|
||||||
|
std::unique_ptr<Task> task = hash_pool_->TryGetCompletedTask();
|
||||||
|
while (task) {
|
||||||
|
static_cast<HashTask*>(task.get())->AppendHash(&response);
|
||||||
|
free_tasks_.push_back(std::move(task));
|
||||||
|
task = hash_pool_->TryGetCompletedTask();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the remaining chunks, if any.
|
||||||
|
if (response.sizes_size() > 0) {
|
||||||
|
status = message_pump_->SendMessage(PacketType::kAddSignatures, response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send final signatures");
|
||||||
|
}
|
||||||
|
response.Clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send an empty response as EOF marker.
|
||||||
|
status = message_pump_->SendMessage(PacketType::kAddSignatures, response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send signatures EOF marker");
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status CdcInterface::ReceiveSignatureAndCreateAndSendDiff(
|
||||||
|
FILE* file, ReportCdcProgress* progress) {
|
||||||
|
//
|
||||||
|
// Compute signatures from client |file| and send patches while receiving
|
||||||
|
// server signatures.
|
||||||
|
//
|
||||||
|
std::vector<Chunk> client_chunks;
|
||||||
|
ServerChunkReceiver server_chunk_receiver(message_pump_);
|
||||||
|
PatchSender patch_sender(file, message_pump_);
|
||||||
|
|
||||||
|
auto chunk_handler = [&client_chunks](const void* data, size_t size) {
|
||||||
|
client_chunks.emplace_back(ComputeHash(data, size),
|
||||||
|
static_cast<uint32_t>(size));
|
||||||
|
};
|
||||||
|
|
||||||
|
fastcdc::Config config(kMinChunkSize, kAvgChunkSize, kMaxChunkSize);
|
||||||
|
fastcdc::Chunker chunker(config, chunk_handler);
|
||||||
|
|
||||||
|
uint64_t file_size = 0;
|
||||||
|
auto read_handler = [&chunker, &client_chunks, &server_chunk_receiver,
|
||||||
|
&file_size, progress,
|
||||||
|
&patch_sender](const void* data, size_t size) {
|
||||||
|
// Process client chunks for the data read.
|
||||||
|
chunker.Process(static_cast<const uint8_t*>(data), size);
|
||||||
|
file_size += size;
|
||||||
|
|
||||||
|
const bool all_client_chunks_read = data == nullptr;
|
||||||
|
if (all_client_chunks_read) {
|
||||||
|
chunker.Finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
// Receive any server chunks available.
|
||||||
|
uint64_t num_server_bytes_processed = 0;
|
||||||
|
absl::Status status = server_chunk_receiver.Receive(
|
||||||
|
/*block=*/all_client_chunks_read, &num_server_bytes_processed);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive server chunks");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to send patch data.
|
||||||
|
uint64_t num_client_bytes_processed = 0;
|
||||||
|
status = patch_sender.TryAddChunks(client_chunks, server_chunk_receiver,
|
||||||
|
&num_client_bytes_processed);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send patch data");
|
||||||
|
}
|
||||||
|
|
||||||
|
progress->ReportSyncProgress(num_client_bytes_processed,
|
||||||
|
num_server_bytes_processed);
|
||||||
|
} while (all_client_chunks_read &&
|
||||||
|
(!server_chunk_receiver.AllChunksReceived() ||
|
||||||
|
patch_sender.CurrChunkIdx() < client_chunks.size()));
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
};
|
||||||
|
|
||||||
|
absl::Status status =
|
||||||
|
path::StreamReadFileContents(file, kFileIoBufferSize, read_handler);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to stream file");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should have sent all client chunks by now.
|
||||||
|
assert(patch_sender.CurrChunkIdx() == client_chunks.size());
|
||||||
|
|
||||||
|
// Flush remaining patches.
|
||||||
|
status = patch_sender.Flush();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to flush patches");
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status CdcInterface::ReceiveDiffAndPatch(
|
||||||
|
const std::string& basis_filepath, FILE* patched_file,
|
||||||
|
bool* is_executable) {
|
||||||
|
Buffer buffer;
|
||||||
|
*is_executable = false;
|
||||||
|
|
||||||
|
absl::StatusOr<FILE*> basis_file = path::OpenFile(basis_filepath, "rb");
|
||||||
|
if (!basis_file.ok()) {
|
||||||
|
return basis_file.status();
|
||||||
|
}
|
||||||
|
#if PLATFORM_LINUX
|
||||||
|
// Tell the kernel we'll load the file sequentially (improves IO bandwidth).
|
||||||
|
// It is not strictly true that the basis file is accessed sequentially, but
|
||||||
|
// for larger parts of this file this should be the case.
|
||||||
|
posix_fadvise(fileno(*basis_file), 0, 0, POSIX_FADV_SEQUENTIAL);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool first_chunk = true;
|
||||||
|
for (;;) {
|
||||||
|
AddPatchCommandsRequest request;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_->ReceiveMessage(PacketType::kAddPatchCommands, &request);
|
||||||
|
if (!status.ok()) {
|
||||||
|
fclose(*basis_file);
|
||||||
|
return WrapStatus(status, "Failed to receive AddPatchCommandsRequest");
|
||||||
|
}
|
||||||
|
|
||||||
|
// All arrays must be of the same size.
|
||||||
|
int num_chunks = request.sources_size();
|
||||||
|
if (num_chunks != request.offsets_size() ||
|
||||||
|
num_chunks != request.sizes_size()) {
|
||||||
|
fclose(*basis_file);
|
||||||
|
return MakeStatus(
|
||||||
|
"Corrupted patch command arrays: Expected sizes %i. Actual %i/%i.",
|
||||||
|
num_chunks, request.offsets_size(), request.sizes_size());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_chunks == 0) {
|
||||||
|
// A zero-size request marks the end of patch commands.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int n = 0; n < num_chunks; ++n) {
|
||||||
|
AddPatchCommandsRequest::Source source = request.sources(n);
|
||||||
|
uint64_t chunk_offset = request.offsets(n);
|
||||||
|
uint32_t chunk_size = request.sizes(n);
|
||||||
|
|
||||||
|
const char* chunk_data = nullptr;
|
||||||
|
if (source == AddPatchCommandsRequest::SOURCE_BASIS_FILE) {
|
||||||
|
// Copy [chunk_offset, chunk_offset + chunk_size) from |basis_file|.
|
||||||
|
buffer.resize(chunk_size);
|
||||||
|
if (fseek64(*basis_file, chunk_offset, SEEK_SET) != 0 ||
|
||||||
|
fread(buffer.data(), 1, chunk_size, *basis_file) != chunk_size) {
|
||||||
|
fclose(*basis_file);
|
||||||
|
return MakeStatus(
|
||||||
|
"Failed to read %u bytes at offset %u from basis file",
|
||||||
|
chunk_size, chunk_offset);
|
||||||
|
}
|
||||||
|
chunk_data = buffer.data();
|
||||||
|
} else {
|
||||||
|
// Write [chunk_offset, chunk_offset + chunk_size) from request data.
|
||||||
|
assert(source == AddPatchCommandsRequest::SOURCE_DATA);
|
||||||
|
if (request.data().size() < chunk_offset + chunk_size) {
|
||||||
|
fclose(*basis_file);
|
||||||
|
return MakeStatus(
|
||||||
|
"Insufficient data in patch commands. Required %u. Actual %u.",
|
||||||
|
chunk_offset + chunk_size, request.data().size());
|
||||||
|
}
|
||||||
|
chunk_data = &request.data()[chunk_offset];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (first_chunk && chunk_size > 0) {
|
||||||
|
first_chunk = false;
|
||||||
|
*is_executable = Util::IsExecutable(chunk_data, chunk_size);
|
||||||
|
}
|
||||||
|
if (fwrite(chunk_data, 1, chunk_size, patched_file) != chunk_size) {
|
||||||
|
fclose(*basis_file);
|
||||||
|
return MakeStatus("Failed to write %u bytes to patched file",
|
||||||
|
chunk_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fclose(*basis_file);
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
73
cdc_rsync/base/cdc_interface.h
Normal file
73
cdc_rsync/base/cdc_interface.h
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_BASE_CDC_INTERFACE_H_
|
||||||
|
#define CDC_RSYNC_BASE_CDC_INTERFACE_H_
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "common/threadpool.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class MessagePump;
|
||||||
|
|
||||||
|
class ReportCdcProgress {
|
||||||
|
public:
|
||||||
|
virtual ~ReportCdcProgress() = default;
|
||||||
|
virtual void ReportSyncProgress(size_t num_client_bytes_processed,
|
||||||
|
size_t num_server_bytes_processed) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Creates signatures, diffs and patches files. Abstraction layer for fastcdc
|
||||||
|
// chunking and blake3 hashing.
|
||||||
|
class CdcInterface {
|
||||||
|
public:
|
||||||
|
explicit CdcInterface(MessagePump* message_pump);
|
||||||
|
|
||||||
|
// Creates the signature of the file at |filepath| and sends it to the socket.
|
||||||
|
// Typically called on the server.
|
||||||
|
absl::Status CreateAndSendSignature(const std::string& filepath);
|
||||||
|
|
||||||
|
// Receives the server-side signature of |file| from the socket, creates diff
|
||||||
|
// data using the signature and the file, and sends the diffs to the socket.
|
||||||
|
// Typically called on the client.
|
||||||
|
absl::Status ReceiveSignatureAndCreateAndSendDiff(
|
||||||
|
FILE* file, ReportCdcProgress* progress);
|
||||||
|
|
||||||
|
// Receives diffs from the socket and patches the file at |basis_filepath|.
|
||||||
|
// The patched data is written to |patched_file|, which must be open in "wb"
|
||||||
|
// mode. Sets |is_executable| to true if the patched file is an executable
|
||||||
|
// (based on magic headers).
|
||||||
|
// Typically called on the server.
|
||||||
|
absl::Status ReceiveDiffAndPatch(const std::string& basis_filepath,
|
||||||
|
FILE* patched_file, bool* is_executable);
|
||||||
|
|
||||||
|
private:
|
||||||
|
MessagePump* const message_pump_;
|
||||||
|
|
||||||
|
// Thread pool for computing chunk hashes.
|
||||||
|
std::unique_ptr<Threadpool> hash_pool_;
|
||||||
|
|
||||||
|
// List of unused hash computation tasks. Tasks are reused by the hash pool
|
||||||
|
// in order to prevent buffer reallocation.
|
||||||
|
std::vector<std::unique_ptr<Task>> free_tasks_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_BASE_CDC_INTERFACE_H_
|
||||||
118
cdc_rsync/base/cdc_interface_test.cc
Normal file
118
cdc_rsync/base/cdc_interface_test.cc
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/base/cdc_interface.h"
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
#include "cdc_rsync/base/fake_socket.h"
|
||||||
|
#include "cdc_rsync/base/message_pump.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/status_test_macros.h"
|
||||||
|
#include "common/test_main.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class FakeCdcProgress : public ReportCdcProgress {
|
||||||
|
public:
|
||||||
|
void ReportSyncProgress(uint64_t num_client_bytes_processed,
|
||||||
|
uint64_t num_server_bytes_processed) override {
|
||||||
|
total_client_bytes_processed += num_client_bytes_processed;
|
||||||
|
total_server_bytes_processed += num_server_bytes_processed;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t total_client_bytes_processed = 0;
|
||||||
|
uint64_t total_server_bytes_processed = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class CdcInterfaceTest : public ::testing::Test {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
Log::Initialize(std::make_unique<ConsoleLog>(LogLevel::kInfo));
|
||||||
|
message_pump_.StartMessagePump();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TearDown() override {
|
||||||
|
socket_.ShutdownSendingEnd();
|
||||||
|
message_pump_.StopMessagePump();
|
||||||
|
Log::Shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
FakeSocket socket_;
|
||||||
|
MessagePump message_pump_{&socket_, MessagePump::PacketReceivedDelegate()};
|
||||||
|
|
||||||
|
std::string base_dir_ = GetTestDataDir("cdc_interface");
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(CdcInterfaceTest, SyncTest) {
|
||||||
|
CdcInterface cdc(&message_pump_);
|
||||||
|
FakeCdcProgress progress;
|
||||||
|
|
||||||
|
const std::string old_filepath = path::Join(base_dir_, "old_file.txt");
|
||||||
|
const std::string new_filepath = path::Join(base_dir_, "new_file.txt");
|
||||||
|
const std::string patched_filepath =
|
||||||
|
path::Join(base_dir_, "patched_file.txt");
|
||||||
|
|
||||||
|
path::Stats old_stats;
|
||||||
|
EXPECT_OK(path::GetStats(old_filepath, &old_stats));
|
||||||
|
|
||||||
|
path::Stats new_stats;
|
||||||
|
EXPECT_OK(path::GetStats(new_filepath, &new_stats));
|
||||||
|
|
||||||
|
// Create signature of old file and send it to the fake socket (it'll just
|
||||||
|
// send it to itself).
|
||||||
|
EXPECT_OK(cdc.CreateAndSendSignature(old_filepath));
|
||||||
|
|
||||||
|
// Receive the signature from the fake socket, generate the diff to the file
|
||||||
|
// at |new_filepath| and send it to the socket again.
|
||||||
|
absl::StatusOr<FILE*> new_file = path::OpenFile(new_filepath, "rb");
|
||||||
|
EXPECT_OK(new_file);
|
||||||
|
EXPECT_OK(cdc.ReceiveSignatureAndCreateAndSendDiff(*new_file, &progress));
|
||||||
|
fclose(*new_file);
|
||||||
|
|
||||||
|
// Receive the diff from the fake socket and create a patched file.
|
||||||
|
std::FILE* patched_file = std::tmpfile();
|
||||||
|
ASSERT_TRUE(patched_file != nullptr);
|
||||||
|
bool is_executable = false;
|
||||||
|
EXPECT_OK(
|
||||||
|
cdc.ReceiveDiffAndPatch(old_filepath, patched_file, &is_executable));
|
||||||
|
EXPECT_FALSE(is_executable);
|
||||||
|
|
||||||
|
// Read new file.
|
||||||
|
std::ifstream new_file_stream(new_filepath.c_str(), std::ios::binary);
|
||||||
|
std::vector<uint8_t> new_file_data(
|
||||||
|
std::istreambuf_iterator<char>(new_file_stream), {});
|
||||||
|
|
||||||
|
// Read patched file.
|
||||||
|
fseek(patched_file, 0, SEEK_END);
|
||||||
|
std::vector<uint8_t> patched_file_data(ftell(patched_file));
|
||||||
|
fseek(patched_file, 0, SEEK_SET);
|
||||||
|
fread(patched_file_data.data(), 1, patched_file_data.size(), patched_file);
|
||||||
|
|
||||||
|
// New and patched file should be equal now.
|
||||||
|
EXPECT_EQ(patched_file_data, new_file_data);
|
||||||
|
fclose(patched_file);
|
||||||
|
|
||||||
|
// Verify progress tracker.
|
||||||
|
EXPECT_EQ(progress.total_server_bytes_processed, old_stats.size);
|
||||||
|
EXPECT_EQ(progress.total_client_bytes_processed, new_stats.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
70
cdc_rsync/base/fake_socket.cc
Normal file
70
cdc_rsync/base/fake_socket.cc
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/base/fake_socket.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
FakeSocket::FakeSocket() = default;
|
||||||
|
|
||||||
|
FakeSocket::~FakeSocket() = default;
|
||||||
|
|
||||||
|
absl::Status FakeSocket::Send(const void* buffer, size_t size) {
|
||||||
|
// Wait until we can send again.
|
||||||
|
std::unique_lock<std::mutex> suspend_lock(suspend_mutex_);
|
||||||
|
suspend_cv_.wait(suspend_lock, [this]() { return !sending_suspended_; });
|
||||||
|
suspend_lock.unlock();
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> lock(data_mutex_);
|
||||||
|
data_.append(static_cast<const char*>(buffer), size);
|
||||||
|
lock.unlock();
|
||||||
|
data_cv_.notify_all();
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status FakeSocket::Receive(void* buffer, size_t size,
|
||||||
|
bool allow_partial_read,
|
||||||
|
size_t* bytes_received) {
|
||||||
|
*bytes_received = 0;
|
||||||
|
std::unique_lock<std::mutex> lock(data_mutex_);
|
||||||
|
data_cv_.wait(lock, [this, size, allow_partial_read]() {
|
||||||
|
return allow_partial_read || data_.size() >= size || shutdown_;
|
||||||
|
});
|
||||||
|
if (shutdown_) {
|
||||||
|
return absl::UnavailableError("Pipe is shut down");
|
||||||
|
}
|
||||||
|
size_t to_copy = std::min(size, data_.size());
|
||||||
|
memcpy(buffer, data_.data(), to_copy);
|
||||||
|
*bytes_received = to_copy;
|
||||||
|
|
||||||
|
// This is horribly inefficent, but should be OK in a fake.
|
||||||
|
data_.erase(0, to_copy);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeSocket::ShutdownSendingEnd() {
|
||||||
|
std::unique_lock<std::mutex> lock(data_mutex_);
|
||||||
|
shutdown_ = true;
|
||||||
|
lock.unlock();
|
||||||
|
data_cv_.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeSocket::SuspendSending(bool suspended) {
|
||||||
|
std::unique_lock<std::mutex> lock(suspend_mutex_);
|
||||||
|
sending_suspended_ = suspended;
|
||||||
|
lock.unlock();
|
||||||
|
suspend_cv_.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
57
cdc_rsync/base/fake_socket.h
Normal file
57
cdc_rsync/base/fake_socket.h
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_BASE_FAKE_SOCKET_H_
|
||||||
|
#define CDC_RSYNC_BASE_FAKE_SOCKET_H_
|
||||||
|
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "cdc_rsync/base/socket.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Fake socket that receives the same data it sends.
|
||||||
|
class FakeSocket : public Socket {
|
||||||
|
public:
|
||||||
|
FakeSocket();
|
||||||
|
~FakeSocket();
|
||||||
|
|
||||||
|
// Socket:
|
||||||
|
absl::Status Send(const void* buffer, size_t size) override; // thread-safe
|
||||||
|
absl::Status Receive(void* buffer, size_t size, bool allow_partial_read,
|
||||||
|
size_t* bytes_received) override; // thread-safe
|
||||||
|
|
||||||
|
void ShutdownSendingEnd();
|
||||||
|
|
||||||
|
// If set to true, blocks on Send() until it is set to false again.
|
||||||
|
void SuspendSending(bool suspended);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::mutex data_mutex_;
|
||||||
|
std::condition_variable data_cv_;
|
||||||
|
std::string data_;
|
||||||
|
bool shutdown_ = false;
|
||||||
|
|
||||||
|
bool sending_suspended_ = false;
|
||||||
|
std::mutex suspend_mutex_;
|
||||||
|
std::condition_variable suspend_cv_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_BASE_FAKE_SOCKET_H_
|
||||||
473
cdc_rsync/base/message_pump.cc
Normal file
473
cdc_rsync/base/message_pump.cc
Normal file
@@ -0,0 +1,473 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/base/message_pump.h"
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "cdc_rsync/base/socket.h"
|
||||||
|
#include "common/buffer.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "google/protobuf/message_lite.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Max total size of messages in the packet queues.
|
||||||
|
// If exdeeded, Send/Receive methods start blocking.
|
||||||
|
uint64_t kInOutBufferSize = 1024 * 1024 * 8;
|
||||||
|
|
||||||
|
// Header is 1 byte type, 3 bytes size.
|
||||||
|
constexpr size_t kHeaderSize = 4;
|
||||||
|
|
||||||
|
// Size is compressed to 3 bytes.
|
||||||
|
constexpr uint32_t kMaxPacketSize = 256 * 256 * 256 - 1;
|
||||||
|
|
||||||
|
// Creates a packet of size |kHeaderSize| + |size| and sets the header.
|
||||||
|
absl::Status CreateSerializedPacket(PacketType type, size_t size,
|
||||||
|
Buffer* serialized_packet) {
|
||||||
|
if (size > kMaxPacketSize) {
|
||||||
|
return MakeStatus("Max packet size exceeded: %u", size);
|
||||||
|
}
|
||||||
|
|
||||||
|
serialized_packet->clear();
|
||||||
|
serialized_packet->reserve(kHeaderSize + size);
|
||||||
|
|
||||||
|
// Header is 1 byte type, 3 bytes size.
|
||||||
|
static_assert(static_cast<size_t>(PacketType::kCount) <= 256, "");
|
||||||
|
static_assert(kMaxPacketSize < 256 * 256 * 256, "");
|
||||||
|
static_assert(kHeaderSize == 4, "");
|
||||||
|
|
||||||
|
uint8_t header[] = {static_cast<uint8_t>(type),
|
||||||
|
static_cast<uint8_t>(size & 0xFF),
|
||||||
|
static_cast<uint8_t>((size >> 8) & 0xFF),
|
||||||
|
static_cast<uint8_t>((size >> 16) & 0xFF)};
|
||||||
|
serialized_packet->append(header, sizeof(header));
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define HANDLE_PACKET_TYPE(type) \
|
||||||
|
case PacketType::type: \
|
||||||
|
return #type;
|
||||||
|
|
||||||
|
const char* PacketTypeName(PacketType type) {
|
||||||
|
if (type > PacketType::kCount) {
|
||||||
|
return "<unknown>";
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (type) {
|
||||||
|
HANDLE_PACKET_TYPE(kRawData)
|
||||||
|
HANDLE_PACKET_TYPE(kTest)
|
||||||
|
HANDLE_PACKET_TYPE(kSetOptions)
|
||||||
|
HANDLE_PACKET_TYPE(kToggleCompression)
|
||||||
|
HANDLE_PACKET_TYPE(kAddFiles)
|
||||||
|
HANDLE_PACKET_TYPE(kSendFileStats)
|
||||||
|
HANDLE_PACKET_TYPE(kAddFileIndices)
|
||||||
|
HANDLE_PACKET_TYPE(kSendMissingFileData)
|
||||||
|
HANDLE_PACKET_TYPE(kAddSignatures)
|
||||||
|
HANDLE_PACKET_TYPE(kAddPatchCommands)
|
||||||
|
HANDLE_PACKET_TYPE(kAddDeletedFiles)
|
||||||
|
HANDLE_PACKET_TYPE(kShutdown)
|
||||||
|
HANDLE_PACKET_TYPE(kCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "<unknown>";
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef HANDLE_PACKET_TYPE
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
MessagePump::MessagePump(Socket* socket, PacketReceivedDelegate packet_received)
|
||||||
|
: socket_(socket),
|
||||||
|
packet_received_(packet_received),
|
||||||
|
creation_thread_id_(std::this_thread::get_id()) {
|
||||||
|
assert(socket_ != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
MessagePump::~MessagePump() { StopMessagePump(); }
|
||||||
|
|
||||||
|
void MessagePump::StartMessagePump() {
|
||||||
|
assert(creation_thread_id_ == std::this_thread::get_id());
|
||||||
|
|
||||||
|
message_sender_thread_ = std::thread([this]() { ThreadSenderMain(); });
|
||||||
|
message_receiver_thread_ = std::thread([this]() { ThreadReceiverMain(); });
|
||||||
|
}
|
||||||
|
|
||||||
|
void MessagePump::StopMessagePump() {
|
||||||
|
assert(creation_thread_id_ == std::this_thread::get_id());
|
||||||
|
|
||||||
|
if (shutdown_) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
FlushOutgoingQueue();
|
||||||
|
|
||||||
|
{
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
absl::MutexLock incoming_lock(&incoming_mutex_);
|
||||||
|
shutdown_ = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (message_sender_thread_.joinable()) {
|
||||||
|
message_sender_thread_.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (message_receiver_thread_.joinable()) {
|
||||||
|
message_receiver_thread_.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::SendRawData(const void* data, size_t size) {
|
||||||
|
Buffer serialized_packet;
|
||||||
|
absl::Status status =
|
||||||
|
CreateSerializedPacket(PacketType::kRawData, size, &serialized_packet);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
const uint8_t* u8_data = static_cast<const uint8_t*>(data);
|
||||||
|
serialized_packet.append(u8_data, size);
|
||||||
|
return QueuePacket(std::move(serialized_packet));
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::SendMessage(
|
||||||
|
PacketType type, const google::protobuf::MessageLite& message) {
|
||||||
|
Buffer serialized_packet;
|
||||||
|
size_t size = message.ByteSizeLong();
|
||||||
|
absl::Status status = CreateSerializedPacket(type, size, &serialized_packet);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize the message directly into the packet.
|
||||||
|
serialized_packet.resize(kHeaderSize + size);
|
||||||
|
if (size > 0 &&
|
||||||
|
!message.SerializeToArray(serialized_packet.data() + kHeaderSize,
|
||||||
|
static_cast<int>(size))) {
|
||||||
|
return MakeStatus("Failed to serialize message to array");
|
||||||
|
}
|
||||||
|
|
||||||
|
return QueuePacket(std::move(serialized_packet));
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::QueuePacket(Buffer&& serialize_packet) {
|
||||||
|
// Wait a little if the max queue size is exceeded.
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
auto cond = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(outgoing_mutex_) {
|
||||||
|
return outgoing_packets_byte_size_ < kInOutBufferSize || send_error_ ||
|
||||||
|
receive_error_;
|
||||||
|
};
|
||||||
|
outgoing_mutex_.Await(absl::Condition(&cond));
|
||||||
|
|
||||||
|
// There could be a race where send_error_ is set to true after this, but
|
||||||
|
// that's OK.
|
||||||
|
if (send_error_ || receive_error_) {
|
||||||
|
absl::MutexLock status_lock(&status_mutex_);
|
||||||
|
return WrapStatus(status_,
|
||||||
|
"Failed to send packet. Message pump thread is down");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put packet into outgoing queue.
|
||||||
|
outgoing_packets_byte_size_ += serialize_packet.size();
|
||||||
|
outgoing_packets_.push(std::move(serialize_packet));
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::ThreadDoSendPacket(Buffer&& serialized_packet) {
|
||||||
|
if (receive_error_) {
|
||||||
|
// Just eat the packet if there was a receive error as the other side is
|
||||||
|
// probably down and won't read packets anymore.
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (output_handler_) {
|
||||||
|
// Redirect output, don't send to socket.
|
||||||
|
absl::Status status =
|
||||||
|
output_handler_(serialized_packet.data(), serialized_packet.size());
|
||||||
|
return WrapStatus(status, "Output handler failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status status =
|
||||||
|
socket_->Send(serialized_packet.data(), serialized_packet.size());
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send packet of size %u",
|
||||||
|
serialized_packet.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_VERBOSE("Sent packet of size %u (total buffer: %u)",
|
||||||
|
serialized_packet.size(), outgoing_packets_byte_size_.load());
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::ReceiveRawData(Buffer* data) {
|
||||||
|
Packet packet;
|
||||||
|
absl::Status status = DequeuePacket(&packet);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to dequeue packet");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (packet.type != PacketType::kRawData) {
|
||||||
|
return MakeStatus("Unexpected packet type %s. Expected kRawData.",
|
||||||
|
PacketTypeName(packet.type));
|
||||||
|
}
|
||||||
|
|
||||||
|
*data = std::move(packet.data);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::ReceiveMessage(
|
||||||
|
PacketType type, google::protobuf::MessageLite* message) {
|
||||||
|
Packet packet;
|
||||||
|
absl::Status status = DequeuePacket(&packet);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to dequeue packet");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (packet.type != type) {
|
||||||
|
return MakeStatus("Unexpected packet type %s. Expected %s.",
|
||||||
|
PacketTypeName(packet.type), PacketTypeName(type));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!message->ParseFromArray(packet.data.data(),
|
||||||
|
static_cast<int>(packet.data.size()))) {
|
||||||
|
return MakeStatus("Failed to parse packet of type %s and size %u",
|
||||||
|
PacketTypeName(packet.type), packet.data.size());
|
||||||
|
}
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::DequeuePacket(Packet* packet) {
|
||||||
|
// Wait for a packet to be available.
|
||||||
|
absl::MutexLock incoming_lock(&incoming_mutex_);
|
||||||
|
auto cond = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(incoming_mutex_) {
|
||||||
|
return !incoming_packets_.empty() || send_error_ || receive_error_;
|
||||||
|
};
|
||||||
|
incoming_mutex_.Await(absl::Condition(&cond));
|
||||||
|
|
||||||
|
// If receive_error_ is true, do not return an error until |incoming_packets_|
|
||||||
|
// is empty and all valid packets have been returned. This way, the error
|
||||||
|
// shows up for the packet that failed to be received.
|
||||||
|
if (send_error_ || (receive_error_ && incoming_packets_.empty())) {
|
||||||
|
absl::MutexLock status_lock(&status_mutex_);
|
||||||
|
return WrapStatus(status_, "Message pump thread is down");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grab packet from incoming queue.
|
||||||
|
*packet = std::move(incoming_packets_.front());
|
||||||
|
incoming_packets_.pop();
|
||||||
|
|
||||||
|
// Update byte size.
|
||||||
|
incoming_packets_byte_size_ -= kHeaderSize + packet->data.size();
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::ThreadDoReceivePacket(Packet* packet) {
|
||||||
|
// Read type and size in one go for performance reasons.
|
||||||
|
uint8_t header[kHeaderSize];
|
||||||
|
absl::Status status = ThreadDoReceive(&header, kHeaderSize);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive packet of size %u",
|
||||||
|
kHeaderSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert(kHeaderSize == 4, "");
|
||||||
|
|
||||||
|
uint8_t packet_type = header[0];
|
||||||
|
uint32_t packet_size = static_cast<uint32_t>(header[1]) |
|
||||||
|
(static_cast<uint32_t>(header[2]) << 8) |
|
||||||
|
(static_cast<uint32_t>(header[3]) << 16);
|
||||||
|
|
||||||
|
if (packet_type >= static_cast<uint8_t>(PacketType::kCount)) {
|
||||||
|
return MakeStatus("Invalid packet type: %u", packet_type);
|
||||||
|
}
|
||||||
|
packet->type = static_cast<PacketType>(packet_type);
|
||||||
|
|
||||||
|
if (packet_size > kMaxPacketSize) {
|
||||||
|
return MakeStatus("Max packet size exceeded: %u", packet_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
packet->data.resize(packet_size);
|
||||||
|
status = ThreadDoReceive(packet->data.data(), packet_size);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to read packet data of size %u",
|
||||||
|
packet_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_VERBOSE("Received packet of size %u (total buffer: %u)", packet_size,
|
||||||
|
incoming_packets_byte_size_.load());
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MessagePump::ThreadDoReceive(void* buffer, size_t size) {
|
||||||
|
if (size == 0) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (input_reader_) {
|
||||||
|
size_t bytes_read = 0;
|
||||||
|
bool eof = false;
|
||||||
|
absl::Status status = input_reader_->Read(buffer, size, &bytes_read, &eof);
|
||||||
|
if (eof) {
|
||||||
|
input_reader_.reset();
|
||||||
|
}
|
||||||
|
if (!status.ok()) {
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
// |input_reader_| should read |size| bytes unless |eof| is hit.
|
||||||
|
assert(bytes_read == size || eof);
|
||||||
|
|
||||||
|
// Since this method never reads across packet boundaries and since packets
|
||||||
|
// should not be partially received through |input_reader_|, it is an error
|
||||||
|
// if there's a partial read on EOF.
|
||||||
|
if (eof && (bytes_read > 0 && bytes_read < size)) {
|
||||||
|
return MakeStatus("EOF after partial read of %u / %u bytes", bytes_read,
|
||||||
|
size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special case, might happen if |input_reader_| was an unzip stream and the
|
||||||
|
// last read stopped right before zlib's EOF marker. Fall through to reading
|
||||||
|
// uncompressed data in that case.
|
||||||
|
if (bytes_read == size) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(eof && bytes_read == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t unused;
|
||||||
|
return socket_->Receive(buffer, size, /*allow_partial_read=*/false, &unused);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MessagePump::FlushOutgoingQueue() {
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
auto cond = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(outgoing_mutex_) {
|
||||||
|
return outgoing_packets_byte_size_ == 0 || send_error_ || receive_error_;
|
||||||
|
};
|
||||||
|
outgoing_mutex_.Await(absl::Condition(&cond));
|
||||||
|
}
|
||||||
|
|
||||||
|
void MessagePump::RedirectInput(std::unique_ptr<InputReader> input_reader) {
|
||||||
|
assert(std::this_thread::get_id() == message_receiver_thread_.get_id());
|
||||||
|
assert(input_reader);
|
||||||
|
|
||||||
|
if (input_reader_) {
|
||||||
|
LOG_WARNING("Input reader already set");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
input_reader_ = std::move(input_reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MessagePump::RedirectOutput(OutputHandler output_handler) {
|
||||||
|
FlushOutgoingQueue();
|
||||||
|
output_handler_ = std::move(output_handler);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t MessagePump::GetNumOutgoingPackagesForTesting() {
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
return outgoing_packets_.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t MessagePump::GetMaxInOutBufferSizeForTesting() {
|
||||||
|
return kInOutBufferSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t MessagePump::GetMaxPacketSizeForTesting() { return kMaxPacketSize; }
|
||||||
|
|
||||||
|
void MessagePump::ThreadSenderMain() {
|
||||||
|
while (!send_error_) {
|
||||||
|
Buffer serialized_packet;
|
||||||
|
size_t size;
|
||||||
|
{
|
||||||
|
// Wait for a packet to be available.
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
auto cond = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(outgoing_mutex_) {
|
||||||
|
return outgoing_packets_.size() > 0 || shutdown_;
|
||||||
|
};
|
||||||
|
outgoing_mutex_.Await(absl::Condition(&cond));
|
||||||
|
if (shutdown_) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grab packet from outgoing queue.
|
||||||
|
serialized_packet = std::move(outgoing_packets_.front());
|
||||||
|
size = serialized_packet.size();
|
||||||
|
outgoing_packets_.pop();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send data. This blocks until all data is submitted.
|
||||||
|
absl::Status status = ThreadDoSendPacket(std::move(serialized_packet));
|
||||||
|
if (!status.ok()) {
|
||||||
|
{
|
||||||
|
absl::MutexLock status_lock(&status_mutex_);
|
||||||
|
status_ = WrapStatus(status, "Failed to send packet");
|
||||||
|
}
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
absl::MutexLock incoming_lock(&incoming_mutex_);
|
||||||
|
send_error_ = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrease AFTER sending, this is important for FlushOutgoingQueue().
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
outgoing_packets_byte_size_ -= size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MessagePump::ThreadReceiverMain() {
|
||||||
|
while (!receive_error_) {
|
||||||
|
// Wait for a packet to be available.
|
||||||
|
{
|
||||||
|
absl::MutexLock incoming_lock(&incoming_mutex_);
|
||||||
|
auto cond = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(incoming_mutex_) {
|
||||||
|
return incoming_packets_byte_size_ < kInOutBufferSize || shutdown_;
|
||||||
|
};
|
||||||
|
incoming_mutex_.Await(absl::Condition(&cond));
|
||||||
|
if (shutdown_) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive packet. This blocks until data is available.
|
||||||
|
Packet packet;
|
||||||
|
absl::Status status = ThreadDoReceivePacket(&packet);
|
||||||
|
if (!status.ok()) {
|
||||||
|
{
|
||||||
|
absl::MutexLock status_lock(&status_mutex_);
|
||||||
|
status_ = WrapStatus(status, "Failed to receive packet");
|
||||||
|
}
|
||||||
|
absl::MutexLock outgoing_lock(&outgoing_mutex_);
|
||||||
|
absl::MutexLock incoming_lock(&incoming_mutex_);
|
||||||
|
receive_error_ = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (packet_received_) {
|
||||||
|
packet_received_(packet.type);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queue the packet for receiving.
|
||||||
|
absl::MutexLock incoming_lock(&incoming_mutex_);
|
||||||
|
incoming_packets_byte_size_ += kHeaderSize + packet.data.size();
|
||||||
|
incoming_packets_.push(std::move(packet));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
275
cdc_rsync/base/message_pump.h
Normal file
275
cdc_rsync/base/message_pump.h
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_BASE_MESSAGE_PUMP_H_
|
||||||
|
#define CDC_RSYNC_BASE_MESSAGE_PUMP_H_
|
||||||
|
|
||||||
|
#include <queue>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#include "absl/base/thread_annotations.h"
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "absl/synchronization/mutex.h"
|
||||||
|
#include "common/buffer.h"
|
||||||
|
|
||||||
|
namespace google {
|
||||||
|
namespace protobuf {
|
||||||
|
class MessageLite;
|
||||||
|
}
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class Socket;
|
||||||
|
|
||||||
|
// See messages.proto. When sending a kXXXRequest from client to server or a
|
||||||
|
// kXXXResponse from server to client, use packet type kXXX. See messages.proto.
|
||||||
|
enum class PacketType {
|
||||||
|
// Not a proto, just raw bytes.
|
||||||
|
kRawData = 0,
|
||||||
|
|
||||||
|
// Used for testing.
|
||||||
|
kTest,
|
||||||
|
|
||||||
|
// Send options to server.
|
||||||
|
kSetOptions,
|
||||||
|
|
||||||
|
// Toggle compression on/off.
|
||||||
|
kToggleCompression,
|
||||||
|
|
||||||
|
//
|
||||||
|
// Send all files from client to server.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Send file paths including timestamps and sizes, and directories to server.
|
||||||
|
// An empty request indicates that all data has been sent.
|
||||||
|
kAddFiles,
|
||||||
|
// Send stats about missing, excessive, changed and matching files to client.
|
||||||
|
kSendFileStats,
|
||||||
|
|
||||||
|
//
|
||||||
|
// Send all missing files from server to client.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Send indices of missing files to client.
|
||||||
|
// An empty request indicates that all data has been sent.
|
||||||
|
// Also used for sending indices of changed files.
|
||||||
|
kAddFileIndices,
|
||||||
|
|
||||||
|
// Start sending missing file data to the server. After each
|
||||||
|
// SendMissingFileDataRequest, the client sends file data as raw packets and
|
||||||
|
// an empty packet to indicate eof.
|
||||||
|
kSendMissingFileData,
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rsync data exchange.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Send signatures to client.
|
||||||
|
// An empty response indicates that all data has been sent.
|
||||||
|
kAddSignatures,
|
||||||
|
|
||||||
|
// Send patch commands to server.
|
||||||
|
// An empty request indicates that all data has been sent.
|
||||||
|
kAddPatchCommands,
|
||||||
|
|
||||||
|
//
|
||||||
|
// Deletion of extraneous files.
|
||||||
|
//
|
||||||
|
kAddDeletedFiles,
|
||||||
|
|
||||||
|
//
|
||||||
|
// Shutdown.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Ask the server to shut down. Also used for shutdown ack.
|
||||||
|
kShutdown,
|
||||||
|
|
||||||
|
// Must be last.
|
||||||
|
kCount
|
||||||
|
};
|
||||||
|
|
||||||
|
class MessagePump {
|
||||||
|
public:
|
||||||
|
using PacketReceivedDelegate = std::function<void(PacketType)>;
|
||||||
|
|
||||||
|
// |socket| is the underlying socket that data is sent to and received from,
|
||||||
|
// unless redirected with one of the Redirect* methods. |packet_received| is
|
||||||
|
// a callback that is called from the receiver thread as soon as a packet is
|
||||||
|
// received. RedirectInput() should be called from this delegate. Useful for
|
||||||
|
// things like decompression.
|
||||||
|
MessagePump(Socket* socket, PacketReceivedDelegate packet_received);
|
||||||
|
virtual ~MessagePump();
|
||||||
|
|
||||||
|
// Starts worker threads to send/receive messages. Should be called after the
|
||||||
|
// socket is connected. Must not be already started.
|
||||||
|
// NOT thread-safe. Should be called from the creation thread.
|
||||||
|
void StartMessagePump();
|
||||||
|
|
||||||
|
// Stops worker threads to send/receive messages. No-op if already stopped or
|
||||||
|
// not started. Cannot be restarted.
|
||||||
|
// NOT thread-safe. Should be called from the creation thread.
|
||||||
|
void StopMessagePump() ABSL_LOCKS_EXCLUDED(outgoing_mutex_, incoming_mutex_);
|
||||||
|
|
||||||
|
// Queues data for sending. May block if too much data is queued.
|
||||||
|
// Thread-safe.
|
||||||
|
absl::Status SendRawData(const void* data, size_t size);
|
||||||
|
absl::Status SendMessage(PacketType type,
|
||||||
|
const google::protobuf::MessageLite& message);
|
||||||
|
|
||||||
|
// Receives a packet. Blocks if currently no packets is available.
|
||||||
|
// Thread-safe.
|
||||||
|
absl::Status ReceiveRawData(Buffer* data);
|
||||||
|
absl::Status ReceiveMessage(PacketType type,
|
||||||
|
google::protobuf::MessageLite* message);
|
||||||
|
|
||||||
|
// Returns true if the Receive* functions have data available. Note that
|
||||||
|
// receiving messages from multiple threads might be racy, i.e. if
|
||||||
|
// CanReceive() returns true and Receive* is called afterwards, the method
|
||||||
|
// might block if another thread has grabbed the packet in the meantime.
|
||||||
|
bool CanReceive() const { return incoming_packets_byte_size_ > 0; }
|
||||||
|
|
||||||
|
// Blocks until all outgoing messages were sent. Does not prevent that other
|
||||||
|
// threads queue new packets while the method is blocking, so the caller
|
||||||
|
// should make sure that that's not the case for consistent behavior.
|
||||||
|
// Thread-safe.
|
||||||
|
void FlushOutgoingQueue() ABSL_LOCKS_EXCLUDED(outgoing_mutex_);
|
||||||
|
|
||||||
|
class InputReader {
|
||||||
|
public:
|
||||||
|
virtual ~InputReader() {}
|
||||||
|
|
||||||
|
// Reads as much as data possible to |out_buffer|, but no more than
|
||||||
|
// |out_size| bytes. Sets |bytes_read| to the number of bytes read.
|
||||||
|
// |eof| is set to true if no more input data is available. The flag
|
||||||
|
// indicates that the parent MessagePump should reset the input reader
|
||||||
|
// and read data from the socket again.
|
||||||
|
virtual absl::Status Read(void* out_buffer, size_t out_size,
|
||||||
|
size_t* bytes_read, bool* eof) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Starts receiving input from |input_reader| instead of from the socket.
|
||||||
|
// |input_reader| is called on a background thread. It must be a valid
|
||||||
|
// pointer. The input reader stays in place until it returns |eof| == true.
|
||||||
|
// After that, the input reader is reset and data is received from the socket
|
||||||
|
// again.
|
||||||
|
// This method must be called from the receiver thread, usually during the
|
||||||
|
// execution of the PacketReceivedDelegate passed in the constructor.
|
||||||
|
// Otherwise, the receiver thread might be blocked on a recv() call and the
|
||||||
|
// first data received would still be read the socket.
|
||||||
|
void RedirectInput(std::unique_ptr<InputReader> input_reader);
|
||||||
|
|
||||||
|
// If set to a non-empty function, starts sending output to |output_handler|
|
||||||
|
// instead of to the socket. If set to an empty function, starts sending to
|
||||||
|
// the socket again. |output_handler| is called on a background thread.
|
||||||
|
// The outgoing packet queue is flushed prior to changing the output handler.
|
||||||
|
// The caller must make sure that no background threads are sending new
|
||||||
|
// messages while this method is running.
|
||||||
|
using OutputHandler =
|
||||||
|
std::function<absl::Status(const void* data, size_t size)>;
|
||||||
|
void RedirectOutput(OutputHandler output_handler);
|
||||||
|
|
||||||
|
// Returns the number of packets queued for sending.
|
||||||
|
size_t GetNumOutgoingPackagesForTesting()
|
||||||
|
ABSL_LOCKS_EXCLUDED(outgoing_mutex_);
|
||||||
|
|
||||||
|
// Returns the max total size of messages in the packet queues.
|
||||||
|
size_t GetMaxInOutBufferSizeForTesting();
|
||||||
|
|
||||||
|
// Returns hte max size of a single raw or proto message (including header).
|
||||||
|
size_t GetMaxPacketSizeForTesting();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
struct Packet {
|
||||||
|
PacketType type = PacketType::kCount;
|
||||||
|
Buffer data;
|
||||||
|
|
||||||
|
// Instances should be moved, not copied.
|
||||||
|
Packet() = default;
|
||||||
|
Packet(Packet&& other) { *this = std::move(other); }
|
||||||
|
Packet(const Packet&) = delete;
|
||||||
|
Packet& operator=(const Packet&) = delete;
|
||||||
|
|
||||||
|
Packet& operator=(Packet&& other) {
|
||||||
|
type = other.type;
|
||||||
|
data = std::move(other.data);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Outgoing packets are already serialized to save mem copies.
|
||||||
|
absl::Status QueuePacket(Buffer&& serialized_packet)
|
||||||
|
ABSL_LOCKS_EXCLUDED(outgoing_mutex_, status_mutex_);
|
||||||
|
absl::Status DequeuePacket(Packet* packet)
|
||||||
|
ABSL_LOCKS_EXCLUDED(incoming_mutex_, status_mutex_);
|
||||||
|
|
||||||
|
// Underlying socket, not owned.
|
||||||
|
Socket* socket_;
|
||||||
|
|
||||||
|
// Delegate called if a packet was received.
|
||||||
|
// Called immediately from the receiver thread.
|
||||||
|
PacketReceivedDelegate packet_received_;
|
||||||
|
|
||||||
|
// Message pump threads main method for sending and receiving data.
|
||||||
|
void ThreadSenderMain() ABSL_LOCKS_EXCLUDED(outgoing_mutex_, status_mutex_);
|
||||||
|
void ThreadReceiverMain() ABSL_LOCKS_EXCLUDED(incoming_mutex_, status_mutex_);
|
||||||
|
|
||||||
|
// Actually send/receive packets.
|
||||||
|
absl::Status ThreadDoSendPacket(Buffer&& serialized_packet);
|
||||||
|
absl::Status ThreadDoReceivePacket(Packet* packet);
|
||||||
|
absl::Status ThreadDoReceive(void* buffer, size_t size);
|
||||||
|
|
||||||
|
std::thread message_sender_thread_;
|
||||||
|
std::thread message_receiver_thread_;
|
||||||
|
|
||||||
|
// If set, input is not received from the socket, but from |input_reader_|.
|
||||||
|
std::unique_ptr<InputReader> input_reader_;
|
||||||
|
// If set, output is not sent to the socket, but to |output_handler_|.
|
||||||
|
OutputHandler output_handler_;
|
||||||
|
|
||||||
|
//
|
||||||
|
// Synchronization of message pump threads and main thread.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Guards to protect access to queued packets.
|
||||||
|
absl::Mutex outgoing_mutex_;
|
||||||
|
absl::Mutex incoming_mutex_ ABSL_ACQUIRED_AFTER(outgoing_mutex_);
|
||||||
|
|
||||||
|
// Queued packets.
|
||||||
|
std::queue<Buffer> outgoing_packets_ ABSL_GUARDED_BY(outgoing_mutex_);
|
||||||
|
std::queue<Packet> incoming_packets_ ABSL_GUARDED_BY(incoming_mutex_);
|
||||||
|
|
||||||
|
// Total size of queued packets. Used to limit max queue size.
|
||||||
|
std::atomic_uint64_t outgoing_packets_byte_size_{0};
|
||||||
|
std::atomic_uint64_t incoming_packets_byte_size_{0};
|
||||||
|
|
||||||
|
// If true, the respective thread saw an error and shut down.
|
||||||
|
std::atomic_bool send_error_{false};
|
||||||
|
std::atomic_bool receive_error_{false};
|
||||||
|
|
||||||
|
// Shutdown signal to sender and receiver threads.
|
||||||
|
std::atomic_bool shutdown_{false};
|
||||||
|
|
||||||
|
absl::Mutex status_mutex_;
|
||||||
|
absl::Status status_ ABSL_GUARDED_BY(status_mutex_);
|
||||||
|
|
||||||
|
std::thread::id creation_thread_id_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_BASE_MESSAGE_PUMP_H_
|
||||||
272
cdc_rsync/base/message_pump_test.cc
Normal file
272
cdc_rsync/base/message_pump_test.cc
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/base/message_pump.h"
|
||||||
|
|
||||||
|
#include "cdc_rsync/base/fake_socket.h"
|
||||||
|
#include "cdc_rsync/protos/messages.pb.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/status_test_macros.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class MessagePumpTest : public ::testing::Test {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
Log::Initialize(std::make_unique<ConsoleLog>(LogLevel::kInfo));
|
||||||
|
message_pump_.StartMessagePump();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TearDown() override {
|
||||||
|
fake_socket_.ShutdownSendingEnd();
|
||||||
|
message_pump_.StopMessagePump();
|
||||||
|
Log::Shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Called on the receiver thread.
|
||||||
|
void ThreadPackageReceived(PacketType type) {
|
||||||
|
// Empty by default. Only takes effect if set by tests.
|
||||||
|
if (type == PacketType::kToggleCompression) {
|
||||||
|
message_pump_.RedirectInput(std::move(fake_compressed_input_reader_));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FakeSocket fake_socket_;
|
||||||
|
MessagePump message_pump_{
|
||||||
|
&fake_socket_, [this](PacketType type) { ThreadPackageReceived(type); }};
|
||||||
|
std::unique_ptr<MessagePump::InputReader> fake_compressed_input_reader_;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, SendReceiveRawData) {
|
||||||
|
// The FakeSocket just routes everything that's sent to the receiving end.
|
||||||
|
const Buffer raw_data = {'r', 'a', 'w'};
|
||||||
|
EXPECT_OK(message_pump_.SendRawData(raw_data.data(), raw_data.size()));
|
||||||
|
|
||||||
|
Buffer received_raw_data;
|
||||||
|
EXPECT_OK(message_pump_.ReceiveRawData(&received_raw_data));
|
||||||
|
|
||||||
|
EXPECT_EQ(raw_data, received_raw_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, SendReceiveMessage) {
|
||||||
|
TestRequest request;
|
||||||
|
request.set_message("message");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, request));
|
||||||
|
|
||||||
|
TestRequest received_request;
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &received_request));
|
||||||
|
|
||||||
|
EXPECT_EQ(request.message(), received_request.message());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, SendReceiveMultiple) {
|
||||||
|
const Buffer raw_data_1 = {'r', 'a', 'w', '1'};
|
||||||
|
const Buffer raw_data_2 = {'r', 'a', 'w', '2'};
|
||||||
|
TestRequest request;
|
||||||
|
request.set_message("message");
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.SendRawData(raw_data_1.data(), raw_data_1.size()));
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, request));
|
||||||
|
EXPECT_OK(message_pump_.SendRawData(raw_data_2.data(), raw_data_2.size()));
|
||||||
|
|
||||||
|
Buffer received_raw_data_1;
|
||||||
|
Buffer received_raw_data_2;
|
||||||
|
TestRequest received_request;
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveRawData(&received_raw_data_1));
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &received_request));
|
||||||
|
EXPECT_OK(message_pump_.ReceiveRawData(&received_raw_data_2));
|
||||||
|
|
||||||
|
EXPECT_EQ(raw_data_1, received_raw_data_1);
|
||||||
|
EXPECT_EQ(request.message(), received_request.message());
|
||||||
|
EXPECT_EQ(raw_data_2, received_raw_data_2);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, ReceiveMessageInstreadOfRaw) {
|
||||||
|
const Buffer raw_data = {'r', 'a', 'w'};
|
||||||
|
EXPECT_OK(message_pump_.SendRawData(raw_data.data(), raw_data.size()));
|
||||||
|
|
||||||
|
TestRequest received_request;
|
||||||
|
EXPECT_NOT_OK(
|
||||||
|
message_pump_.ReceiveMessage(PacketType::kTest, &received_request));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, ReceiveRawInsteadOfMessage) {
|
||||||
|
TestRequest request;
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, request));
|
||||||
|
|
||||||
|
Buffer received_raw_data;
|
||||||
|
EXPECT_NOT_OK(message_pump_.ReceiveRawData(&received_raw_data));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, ReceiveMessageWrongType) {
|
||||||
|
TestRequest request;
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, request));
|
||||||
|
|
||||||
|
ShutdownRequest received_request;
|
||||||
|
EXPECT_NOT_OK(
|
||||||
|
message_pump_.ReceiveMessage(PacketType::kShutdown, &received_request));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, MessageMaxSizeExceeded) {
|
||||||
|
TestRequest request;
|
||||||
|
size_t max_size = message_pump_.GetMaxPacketSizeForTesting();
|
||||||
|
request.set_message(std::string(max_size + 1, 'x'));
|
||||||
|
EXPECT_NOT_OK(message_pump_.SendMessage(PacketType::kTest, request));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, FlushOutgoingQueue) {
|
||||||
|
TestRequest request;
|
||||||
|
request.set_message(std::string(1024 * 4, 'x'));
|
||||||
|
constexpr size_t kNumMessages = 1000;
|
||||||
|
|
||||||
|
// Note: Must stay below max queue size or else SendMessage starts blocking.
|
||||||
|
ASSERT_LT((request.message().size() + 4) * kNumMessages,
|
||||||
|
message_pump_.GetMaxInOutBufferSizeForTesting());
|
||||||
|
|
||||||
|
// Queue up a bunch of large messages.
|
||||||
|
fake_socket_.SuspendSending(true);
|
||||||
|
for (size_t n = 0; n < kNumMessages; ++n) {
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, request));
|
||||||
|
}
|
||||||
|
EXPECT_GT(message_pump_.GetNumOutgoingPackagesForTesting(), 0);
|
||||||
|
|
||||||
|
// Flush the queue.
|
||||||
|
fake_socket_.SuspendSending(false);
|
||||||
|
message_pump_.FlushOutgoingQueue();
|
||||||
|
|
||||||
|
// Check if the queue is empty.
|
||||||
|
EXPECT_EQ(message_pump_.GetNumOutgoingPackagesForTesting(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
class FakeCompressedInputReader : public MessagePump::InputReader {
|
||||||
|
public:
|
||||||
|
explicit FakeCompressedInputReader(Socket* socket) : socket_(socket) {}
|
||||||
|
|
||||||
|
// Doesn't actually do compression, just replaces the word "compressed" by
|
||||||
|
// "COMPRESSED" as a sign that this handler was executed. In the real rsync
|
||||||
|
// algorithm, this is used to decompress data.
|
||||||
|
absl::Status Read(void* out_buffer, size_t out_size, size_t* bytes_read,
|
||||||
|
bool* eof) override {
|
||||||
|
absl::Status status = socket_->Receive(
|
||||||
|
out_buffer, out_size, /*allow_partial_read=*/false, bytes_read);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "socket_->Receive() failed");
|
||||||
|
}
|
||||||
|
assert(*bytes_read == out_size);
|
||||||
|
char* char_buffer = static_cast<char*>(out_buffer);
|
||||||
|
char* pos = strstr(char_buffer, "compressed");
|
||||||
|
if (pos) {
|
||||||
|
memcpy(pos, "COMPRESSED", strlen("COMPRESSED"));
|
||||||
|
}
|
||||||
|
*eof = strstr(char_buffer, "set_eof") != nullptr;
|
||||||
|
return absl::OkStatus();
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
Socket* socket_;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, RedirectInput) {
|
||||||
|
fake_compressed_input_reader_ =
|
||||||
|
std::make_unique<FakeCompressedInputReader>(&fake_socket_);
|
||||||
|
|
||||||
|
TestRequest test_request;
|
||||||
|
ToggleCompressionRequest compression_request;
|
||||||
|
|
||||||
|
test_request.set_message("uncompressed");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, test_request));
|
||||||
|
|
||||||
|
// Once this message is received, |fake_compressed_input_reader_| is set by
|
||||||
|
// ThreadPackageReceived().
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kToggleCompression,
|
||||||
|
compression_request));
|
||||||
|
|
||||||
|
// Send a "compressed" message (should be converted to upper case).
|
||||||
|
test_request.set_message("compressed");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, test_request));
|
||||||
|
|
||||||
|
// Trigger reset of the input reader.
|
||||||
|
test_request.set_message("set_eof");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, test_request));
|
||||||
|
|
||||||
|
// The next message should be "uncompressed" (lower case) again.
|
||||||
|
test_request.set_message("uncompressed");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, test_request));
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &test_request));
|
||||||
|
EXPECT_EQ(test_request.message(), "uncompressed");
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kToggleCompression,
|
||||||
|
&compression_request));
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &test_request));
|
||||||
|
EXPECT_EQ(test_request.message(), "COMPRESSED");
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &test_request));
|
||||||
|
EXPECT_EQ(test_request.message(), "set_eof");
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &test_request));
|
||||||
|
EXPECT_EQ(test_request.message(), "uncompressed");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(MessagePumpTest, RedirectOutput) {
|
||||||
|
// Doesn't actually do compression, just replaces the word "compressed" by
|
||||||
|
// "COMPRESSED" as a sign that this handler was executed. In the real rsync
|
||||||
|
// algorithm, this handler would pipe the data through zstd to compress it.
|
||||||
|
auto fake_compressed_output_handler = [this](const void* data, size_t size) {
|
||||||
|
std::string char_buffer(static_cast<const char*>(data), size);
|
||||||
|
std::string::size_type pos = char_buffer.find("compressed");
|
||||||
|
if (pos != std::string::npos) {
|
||||||
|
char_buffer.replace(pos, strlen("COMPRESSED"), "COMPRESSED");
|
||||||
|
}
|
||||||
|
return fake_socket_.Send(char_buffer.data(), size);
|
||||||
|
};
|
||||||
|
|
||||||
|
TestRequest test_request;
|
||||||
|
ToggleCompressionRequest compression_request;
|
||||||
|
|
||||||
|
test_request.set_message("uncompressed");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, test_request));
|
||||||
|
|
||||||
|
// Set output handler.
|
||||||
|
message_pump_.RedirectOutput(fake_compressed_output_handler);
|
||||||
|
|
||||||
|
// Send a "compressed" message (should be converted to upper case).
|
||||||
|
test_request.set_message("compressed");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, test_request));
|
||||||
|
|
||||||
|
// Clear output handler again.
|
||||||
|
message_pump_.RedirectOutput(MessagePump::OutputHandler());
|
||||||
|
|
||||||
|
// The next message should be "uncompressed" (lower case) again.
|
||||||
|
test_request.set_message("uncompressed");
|
||||||
|
EXPECT_OK(message_pump_.SendMessage(PacketType::kTest, test_request));
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &test_request));
|
||||||
|
EXPECT_EQ(test_request.message(), "uncompressed");
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &test_request));
|
||||||
|
EXPECT_EQ(test_request.message(), "COMPRESSED");
|
||||||
|
|
||||||
|
EXPECT_OK(message_pump_.ReceiveMessage(PacketType::kTest, &test_request));
|
||||||
|
EXPECT_EQ(test_request.message(), "uncompressed");
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
} // namespace cdc_ft
|
||||||
63
cdc_rsync/base/server_exit_code.h
Normal file
63
cdc_rsync/base/server_exit_code.h
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_BASE_SERVER_EXIT_CODE_H_
|
||||||
|
#define CDC_RSYNC_BASE_SERVER_EXIT_CODE_H_
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Since the client cannot distinguish between stderr and stdout (ssh.exe sends
|
||||||
|
// both to stdout), the server marks the beginning and ending of error messages
|
||||||
|
// with this marker char. The client interprets everything in between as an
|
||||||
|
// error message.
|
||||||
|
constexpr char kServerErrorMarker = 0x1e;
|
||||||
|
|
||||||
|
enum ServerExitCode {
|
||||||
|
// Pick a range of exit codes that does not overlap with unrelated exit codes
|
||||||
|
// like bash exit codes.
|
||||||
|
// - 126: error from bash when binary can't be started (permission denied).
|
||||||
|
// - 127: error from bash when binary isn't found
|
||||||
|
// - 255: ssh.exe error code.
|
||||||
|
// Note that codes must be <= 255.
|
||||||
|
|
||||||
|
// KEEP UPDATED!
|
||||||
|
kServerExitCodeMin = 50,
|
||||||
|
|
||||||
|
// Generic error on startup, before out-of-date check, e.g. bad args.
|
||||||
|
kServerExitCodeGenericStartup = 50,
|
||||||
|
|
||||||
|
// A gamelet component is outdated and needs to be re-uploaded.
|
||||||
|
kServerExitCodeOutOfDate = 51,
|
||||||
|
|
||||||
|
//
|
||||||
|
// All other exit codes must be strictly bigger than kServerErrorOutOfDate.
|
||||||
|
// They are guaranteed to be past the out-of-date check.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Unspecified error.
|
||||||
|
kServerExitCodeGeneric = 52,
|
||||||
|
|
||||||
|
// Binding to the forward port failed, probably because there's another
|
||||||
|
// instance of cdc_rsync running.
|
||||||
|
kServerExitCodeAddressInUse = 53,
|
||||||
|
|
||||||
|
// KEEP UPDATED!
|
||||||
|
kServerExitCodeMax = 53,
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_BASE_SERVER_EXIT_CODE_H_
|
||||||
45
cdc_rsync/base/socket.h
Normal file
45
cdc_rsync/base/socket.h
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_BASE_SOCKET_H_
|
||||||
|
#define CDC_RSYNC_BASE_SOCKET_H_
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class Socket {
|
||||||
|
public:
|
||||||
|
Socket() = default;
|
||||||
|
virtual ~Socket() = default;
|
||||||
|
|
||||||
|
// Send data to the socket.
|
||||||
|
virtual absl::Status Send(const void* buffer, size_t size) = 0;
|
||||||
|
|
||||||
|
// Receives data from the socket. Blocks until data is available or the
|
||||||
|
// sending end of the socket gets shut down by the sender.
|
||||||
|
// If |allow_partial_read| is false, blocks until |size| bytes are available.
|
||||||
|
// If |allow_partial_read| is true, may return with success if less than
|
||||||
|
// |size| (but more than 0) bytes were received.
|
||||||
|
// The number of bytes written to |buffer| is returned in |bytes_received|.
|
||||||
|
virtual absl::Status Receive(void* buffer, size_t size,
|
||||||
|
bool allow_partial_read,
|
||||||
|
size_t* bytes_received) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_BASE_SOCKET_H_
|
||||||
1
cdc_rsync/base/testdata/cdc_interface/new_file.txt
vendored
Normal file
1
cdc_rsync/base/testdata/cdc_interface/new_file.txt
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Data for rsync testing. This is the new, modified file on the workstation.
|
||||||
1
cdc_rsync/base/testdata/cdc_interface/old_file.txt
vendored
Normal file
1
cdc_rsync/base/testdata/cdc_interface/old_file.txt
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Data for rsync testing. This is the old version on the gamelet.
|
||||||
0
cdc_rsync/base/testdata/root.txt
vendored
Normal file
0
cdc_rsync/base/testdata/root.txt
vendored
Normal file
125
cdc_rsync/cdc_rsync.cc
Normal file
125
cdc_rsync/cdc_rsync.cc
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/cdc_rsync.h"
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "cdc_rsync/cdc_rsync_client.h"
|
||||||
|
#include "cdc_rsync/error_messages.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path_filter.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
ReturnCode TagToMessage(Tag tag, const Options* options, std::string* msg) {
|
||||||
|
msg->clear();
|
||||||
|
switch (tag) {
|
||||||
|
case Tag::kSocketEof:
|
||||||
|
*msg = kMsgConnectionLost;
|
||||||
|
return ReturnCode::kConnectionLost;
|
||||||
|
|
||||||
|
case Tag::kAddressInUse:
|
||||||
|
*msg = kMsgAddressInUse;
|
||||||
|
return ReturnCode::kAddressInUse;
|
||||||
|
|
||||||
|
case Tag::kDeployServer:
|
||||||
|
*msg = kMsgDeployFailed;
|
||||||
|
return ReturnCode::kDeployFailed;
|
||||||
|
|
||||||
|
case Tag::kInstancePickerNotAvailableInQuietMode:
|
||||||
|
*msg = kMsgInstancePickerNotAvailableInQuietMode;
|
||||||
|
return ReturnCode::kInstancePickerNotAvailableInQuietMode;
|
||||||
|
|
||||||
|
case Tag::kConnectionTimeout:
|
||||||
|
*msg =
|
||||||
|
absl::StrFormat(kMsgFmtConnectionTimeout, options->ip, options->port);
|
||||||
|
return ReturnCode::kConnectionTimeout;
|
||||||
|
|
||||||
|
case Tag::kCount:
|
||||||
|
return ReturnCode::kGenericError;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should not happen (TM). Will fall back to status message in this case.
|
||||||
|
return ReturnCode::kGenericError;
|
||||||
|
}
|
||||||
|
|
||||||
|
PathFilter::Rule::Type ToInternalType(FilterRule::Type type) {
|
||||||
|
switch (type) {
|
||||||
|
case FilterRule::Type::kInclude:
|
||||||
|
return PathFilter::Rule::Type::kInclude;
|
||||||
|
case FilterRule::Type::kExclude:
|
||||||
|
return PathFilter::Rule::Type::kExclude;
|
||||||
|
}
|
||||||
|
assert(false);
|
||||||
|
return PathFilter::Rule::Type::kInclude;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
ReturnCode Sync(const Options* options, const FilterRule* filter_rules,
|
||||||
|
size_t num_filter_rules, const char* sources_dir,
|
||||||
|
const char* const* sources, size_t num_sources,
|
||||||
|
const char* destination, const char** error_message) {
|
||||||
|
LogLevel log_level = Log::VerbosityToLogLevel(options->verbosity);
|
||||||
|
Log::Initialize(std::make_unique<ConsoleLog>(log_level));
|
||||||
|
|
||||||
|
PathFilter path_filter;
|
||||||
|
for (size_t n = 0; n < num_filter_rules; ++n) {
|
||||||
|
path_filter.AddRule(ToInternalType(filter_rules[n].type),
|
||||||
|
filter_rules[n].pattern);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> sources_vec;
|
||||||
|
for (size_t n = 0; n < num_sources; ++n) {
|
||||||
|
sources_vec.push_back(sources[n]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run rsync.
|
||||||
|
GgpRsyncClient client(*options, std::move(path_filter), sources_dir,
|
||||||
|
std::move(sources_vec), destination);
|
||||||
|
absl::Status status = client.Run();
|
||||||
|
|
||||||
|
if (status.ok()) {
|
||||||
|
*error_message = nullptr;
|
||||||
|
return ReturnCode::kOk;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string msg;
|
||||||
|
ReturnCode code = ReturnCode::kGenericError;
|
||||||
|
absl::optional<Tag> tag = GetTag(status);
|
||||||
|
if (tag.has_value()) {
|
||||||
|
code = TagToMessage(tag.value(), options, &msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to status message.
|
||||||
|
if (msg.empty()) {
|
||||||
|
msg = std::string(status.message());
|
||||||
|
} else if (options->verbosity >= 2) {
|
||||||
|
// In verbose mode, log the status as well, so nothing gets lost.
|
||||||
|
LOG_ERROR("%s", status.ToString().c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store error message in static buffer (don't use std::string through DLL
|
||||||
|
// boundary!).
|
||||||
|
static char buf[1024] = {0};
|
||||||
|
strncpy_s(buf, msg.c_str(), _TRUNCATE);
|
||||||
|
*error_message = buf;
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
107
cdc_rsync/cdc_rsync.h
Normal file
107
cdc_rsync/cdc_rsync.h
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_CDC_RSYNC_H_
|
||||||
|
#define CDC_RSYNC_CDC_RSYNC_H_
|
||||||
|
|
||||||
|
#ifdef COMPILING_DLL
|
||||||
|
#define CDC_RSYNC_API __declspec(dllexport)
|
||||||
|
#else
|
||||||
|
#define CDC_RSYNC_API __declspec(dllimport)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct Options {
|
||||||
|
const char* ip = nullptr;
|
||||||
|
int port = 0;
|
||||||
|
bool delete_ = false;
|
||||||
|
bool recursive = false;
|
||||||
|
int verbosity = 0;
|
||||||
|
bool quiet = false;
|
||||||
|
bool whole_file = false;
|
||||||
|
bool relative = false;
|
||||||
|
bool compress = false;
|
||||||
|
bool checksum = false;
|
||||||
|
bool dry_run = false;
|
||||||
|
bool existing = false;
|
||||||
|
bool json = false;
|
||||||
|
const char* copy_dest = nullptr;
|
||||||
|
int compress_level = 6;
|
||||||
|
int connection_timeout_sec = 10;
|
||||||
|
|
||||||
|
// Compression level 0 is invalid.
|
||||||
|
static constexpr int kMinCompressLevel = -5;
|
||||||
|
static constexpr int kMaxCompressLevel = 22;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Rule for including/excluding files.
|
||||||
|
struct FilterRule {
|
||||||
|
enum class Type {
|
||||||
|
kInclude,
|
||||||
|
kExclude,
|
||||||
|
};
|
||||||
|
|
||||||
|
Type type;
|
||||||
|
const char* pattern;
|
||||||
|
|
||||||
|
FilterRule(Type type, const char* pattern) : type(type), pattern(pattern) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class ReturnCode {
|
||||||
|
// No error. Will match the tool's exit code, so OK must be 0.
|
||||||
|
kOk = 0,
|
||||||
|
|
||||||
|
// Generic error.
|
||||||
|
kGenericError = 1,
|
||||||
|
|
||||||
|
// Server connection timed out.
|
||||||
|
kConnectionTimeout = 2,
|
||||||
|
|
||||||
|
// Connection to the server was shut down unexpectedly.
|
||||||
|
kConnectionLost = 3,
|
||||||
|
|
||||||
|
// Binding to the forward port failed, probably because there's another
|
||||||
|
// instance of cdc_rsync running.
|
||||||
|
kAddressInUse = 4,
|
||||||
|
|
||||||
|
// Server deployment failed. This should be rare, it means that the server
|
||||||
|
// components were successfully copied, but the up-to-date check still fails.
|
||||||
|
kDeployFailed = 5,
|
||||||
|
|
||||||
|
// Gamelet selection asks for user input, but we are in quiet mode.
|
||||||
|
kInstancePickerNotAvailableInQuietMode = 6,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Calling Sync() a second time overwrites the data in |error_message|.
|
||||||
|
CDC_RSYNC_API ReturnCode Sync(const Options* options,
|
||||||
|
const FilterRule* filter_rules,
|
||||||
|
size_t filter_num_rules, const char* sources_dir,
|
||||||
|
const char* const* sources, size_t num_sources,
|
||||||
|
const char* destination,
|
||||||
|
const char** error_message);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
} // extern "C"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_CDC_RSYNC_H_
|
||||||
789
cdc_rsync/cdc_rsync_client.cc
Normal file
789
cdc_rsync/cdc_rsync_client.cc
Normal file
@@ -0,0 +1,789 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/cdc_rsync_client.h"
|
||||||
|
|
||||||
|
#include "absl/strings/str_format.h"
|
||||||
|
#include "absl/strings/str_split.h"
|
||||||
|
#include "cdc_rsync/base/cdc_interface.h"
|
||||||
|
#include "cdc_rsync/base/message_pump.h"
|
||||||
|
#include "cdc_rsync/base/server_exit_code.h"
|
||||||
|
#include "cdc_rsync/client_file_info.h"
|
||||||
|
#include "cdc_rsync/client_socket.h"
|
||||||
|
#include "cdc_rsync/file_finder_and_sender.h"
|
||||||
|
#include "cdc_rsync/parallel_file_opener.h"
|
||||||
|
#include "cdc_rsync/progress_tracker.h"
|
||||||
|
#include "cdc_rsync/protos/messages.pb.h"
|
||||||
|
#include "cdc_rsync/zstd_stream.h"
|
||||||
|
#include "common/gamelet_component.h"
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/path.h"
|
||||||
|
#include "common/process.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/status_macros.h"
|
||||||
|
#include "common/stopwatch.h"
|
||||||
|
#include "common/util.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Bash exit code if binary could not be run, e.g. permission denied.
|
||||||
|
constexpr int kExitCodeCouldNotExecute = 126;
|
||||||
|
|
||||||
|
// Bash exit code if binary was not found.
|
||||||
|
constexpr int kExitCodeNotFound = 127;
|
||||||
|
|
||||||
|
constexpr int kForwardPortFirst = 44450;
|
||||||
|
constexpr int kForwardPortLast = 44459;
|
||||||
|
constexpr char kGgpServerFilename[] = "cdc_rsync_server";
|
||||||
|
constexpr char kRemoteToolsBinDir[] = "/opt/developer/tools/bin/";
|
||||||
|
|
||||||
|
SetOptionsRequest::FilterRule::Type ToProtoType(PathFilter::Rule::Type type) {
|
||||||
|
switch (type) {
|
||||||
|
case PathFilter::Rule::Type::kInclude:
|
||||||
|
return SetOptionsRequest::FilterRule::TYPE_INCLUDE;
|
||||||
|
case PathFilter::Rule::Type::kExclude:
|
||||||
|
return SetOptionsRequest::FilterRule::TYPE_EXCLUDE;
|
||||||
|
}
|
||||||
|
assert(false);
|
||||||
|
return SetOptionsRequest::FilterRule::TYPE_INCLUDE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Translates a server process exit code and stderr into a status.
|
||||||
|
absl::Status GetServerExitStatus(int exit_code, const std::string& error_msg) {
|
||||||
|
auto se_code = static_cast<ServerExitCode>(exit_code);
|
||||||
|
switch (se_code) {
|
||||||
|
case kServerExitCodeGenericStartup:
|
||||||
|
if (!error_msg.empty()) {
|
||||||
|
return MakeStatus("Server returned error during startup: %s",
|
||||||
|
error_msg);
|
||||||
|
}
|
||||||
|
return MakeStatus(
|
||||||
|
"Server exited with an unspecified error during startup");
|
||||||
|
|
||||||
|
case kServerExitCodeOutOfDate:
|
||||||
|
return MakeStatus(
|
||||||
|
"Server exited since instance components are out of date");
|
||||||
|
|
||||||
|
case kServerExitCodeGeneric:
|
||||||
|
if (!error_msg.empty()) {
|
||||||
|
return MakeStatus("Server returned error: %s", error_msg);
|
||||||
|
}
|
||||||
|
return MakeStatus("Server exited with an unspecified error");
|
||||||
|
|
||||||
|
case kServerExitCodeAddressInUse:
|
||||||
|
return SetTag(MakeStatus("Server failed to connect"), Tag::kAddressInUse);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Could potentially happen if the server exits due to another reason,
|
||||||
|
// e.g. some ssh.exe error (remember that the server process is actually
|
||||||
|
// an ssh process).
|
||||||
|
return MakeStatus("Server exited with code %i", exit_code);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
GgpRsyncClient::GgpRsyncClient(const Options& options, PathFilter path_filter,
|
||||||
|
std::string sources_dir,
|
||||||
|
std::vector<std::string> sources,
|
||||||
|
std::string destination)
|
||||||
|
: options_(options),
|
||||||
|
path_filter_(std::move(path_filter)),
|
||||||
|
sources_dir_(std::move(sources_dir)),
|
||||||
|
sources_(std::move(sources)),
|
||||||
|
destination_(std::move(destination)),
|
||||||
|
remote_util_(options.verbosity, options.quiet, &process_factory_,
|
||||||
|
/*forward_output_to_log=*/false),
|
||||||
|
port_manager_("cdc_rsync_ports_f77bcdfe-368c-4c45-9f01-230c5e7e2132",
|
||||||
|
kForwardPortFirst, kForwardPortLast, &process_factory_,
|
||||||
|
&remote_util_),
|
||||||
|
printer_(options.quiet, Util::IsTTY() && !options.json),
|
||||||
|
progress_(&printer_, options.verbosity, options.json) {}
|
||||||
|
|
||||||
|
GgpRsyncClient::~GgpRsyncClient() {
|
||||||
|
message_pump_.StopMessagePump();
|
||||||
|
socket_.Disconnect();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::Run() {
|
||||||
|
absl::Status status = remote_util_.GetInitStatus();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to initialize critical components");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize |remote_util_|.
|
||||||
|
remote_util_.SetIpAndPort(options_.ip, options_.port);
|
||||||
|
|
||||||
|
// Start the server process.
|
||||||
|
status = StartServer();
|
||||||
|
if (HasTag(status, Tag::kDeployServer)) {
|
||||||
|
// Gamelet components are not deployed or out-dated. Deploy and retry.
|
||||||
|
status = DeployServer();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to deploy server");
|
||||||
|
}
|
||||||
|
|
||||||
|
status = StartServer();
|
||||||
|
}
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to start server");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag::kSocketEof most likely means that the server had an error exited. In
|
||||||
|
// that case, try to shut it down properly to get more info from the error
|
||||||
|
// message.
|
||||||
|
status = Sync();
|
||||||
|
if (!status.ok() && !HasTag(status, Tag::kSocketEof)) {
|
||||||
|
return WrapStatus(status, "Failed to sync files");
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status stop_status = StopServer();
|
||||||
|
if (!stop_status.ok()) {
|
||||||
|
return WrapStatus(stop_status, "Failed to stop server");
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the server doesn't send any error information, return the sync status.
|
||||||
|
if (server_error_.empty() && HasTag(status, Tag::kSocketEof)) {
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check exit code and stderr.
|
||||||
|
if (server_exit_code_ != 0) {
|
||||||
|
status = GetServerExitStatus(server_exit_code_, server_error_);
|
||||||
|
}
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::StartServer() {
|
||||||
|
assert(!server_process_);
|
||||||
|
|
||||||
|
// Components are expected to reside in the same dir as the executable.
|
||||||
|
std::string component_dir;
|
||||||
|
absl::Status status = path::GetExeDir(&component_dir);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to get the executable directory");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<GameletComponent> components;
|
||||||
|
status = GameletComponent::Get(
|
||||||
|
{path::Join(component_dir, kGgpServerFilename)}, &components);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return MakeStatus(
|
||||||
|
"Required instance component not found. Make sure the file "
|
||||||
|
"cdc_rsync_server resides in the same folder as cdc_rsync.exe.");
|
||||||
|
}
|
||||||
|
std::string component_args = GameletComponent::ToCommandLineArgs(components);
|
||||||
|
|
||||||
|
// Find available local and remote ports for port forwarding.
|
||||||
|
absl::StatusOr<int> port_res =
|
||||||
|
port_manager_.ReservePort(options_.connection_timeout_sec);
|
||||||
|
constexpr char kErrorMsg[] = "Failed to find available port";
|
||||||
|
if (absl::IsDeadlineExceeded(port_res.status())) {
|
||||||
|
// Server didn't respond in time.
|
||||||
|
return SetTag(WrapStatus(port_res.status(), kErrorMsg),
|
||||||
|
Tag::kConnectionTimeout);
|
||||||
|
}
|
||||||
|
if (absl::IsResourceExhausted(port_res.status()))
|
||||||
|
return SetTag(WrapStatus(port_res.status(), kErrorMsg), Tag::kAddressInUse);
|
||||||
|
if (!port_res.ok())
|
||||||
|
return WrapStatus(port_res.status(), "Failed to find available port");
|
||||||
|
int port = *port_res;
|
||||||
|
|
||||||
|
std::string remote_server_path =
|
||||||
|
std::string(kRemoteToolsBinDir) + kGgpServerFilename;
|
||||||
|
// Test existence manually to prevent misleading bash output message
|
||||||
|
// "bash: .../cdc_rsync_server: No such file or directory".
|
||||||
|
std::string remote_command = absl::StrFormat(
|
||||||
|
"if [ ! -f %s ]; then exit %i; fi; %s %i %s", remote_server_path,
|
||||||
|
kExitCodeNotFound, remote_server_path, port, component_args);
|
||||||
|
ProcessStartInfo start_info =
|
||||||
|
remote_util_.BuildProcessStartInfoForSshPortForwardAndCommand(
|
||||||
|
port, port, false, remote_command);
|
||||||
|
start_info.name = "cdc_rsync_server";
|
||||||
|
|
||||||
|
// Capture stdout, but forward to stdout for debugging purposes.
|
||||||
|
start_info.stdout_handler = [this](const char* data, size_t /*data_size*/) {
|
||||||
|
return HandleServerOutput(data);
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unique_ptr<Process> process = process_factory_.Create(start_info);
|
||||||
|
status = process->Start();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to start cdc_rsync_server process");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until the server process is listening.
|
||||||
|
auto detect_listening = [is_listening = &is_server_listening_]() -> bool {
|
||||||
|
return *is_listening;
|
||||||
|
};
|
||||||
|
status = process->RunUntil(detect_listening);
|
||||||
|
if (!status.ok()) {
|
||||||
|
// Some internal process error. Note that this does NOT mean that
|
||||||
|
// cdc_rsync_server does not exist. In that case, the ssh process exits with
|
||||||
|
// code 127.
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (process->HasExited()) {
|
||||||
|
// Don't re-deploy for code > kServerExitCodeOutOfDate, which means that the
|
||||||
|
// out-of-date check already passed on the server.
|
||||||
|
server_exit_code_ = process->ExitCode();
|
||||||
|
if (server_exit_code_ > kServerExitCodeOutOfDate &&
|
||||||
|
server_exit_code_ <= kServerExitCodeMax) {
|
||||||
|
return GetServerExitStatus(server_exit_code_, server_error_);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server exited before it started listening, most likely because of
|
||||||
|
// outdated components (code kServerExitCodeOutOfDate) or because the server
|
||||||
|
// wasn't deployed at all yet (code kExitCodeNotFound). Instruct caller
|
||||||
|
// to re-deploy.
|
||||||
|
return SetTag(MakeStatus("Redeploy server"), Tag::kDeployServer);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(is_server_listening_);
|
||||||
|
status = socket_.Connect(port);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to initialize connection");
|
||||||
|
}
|
||||||
|
|
||||||
|
server_process_ = std::move(process);
|
||||||
|
message_pump_.StartMessagePump();
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::StopServer() {
|
||||||
|
assert(server_process_);
|
||||||
|
|
||||||
|
// Close socket.
|
||||||
|
absl::Status status = socket_.ShutdownSendingEnd();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to shut down socket sending end");
|
||||||
|
}
|
||||||
|
|
||||||
|
status = server_process_->RunUntilExit();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to stop cdc_rsync_server process");
|
||||||
|
}
|
||||||
|
|
||||||
|
server_exit_code_ = server_process_->ExitCode();
|
||||||
|
server_process_.reset();
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::HandleServerOutput(const char* data) {
|
||||||
|
// Note: This is called from a background thread!
|
||||||
|
|
||||||
|
// Handle server error messages. Unfortunately, if the server prints to
|
||||||
|
// stderr, the ssh process does not write it to its stderr, but to stdout, so
|
||||||
|
// we have to jump through hoops to read the error. We use a marker char for
|
||||||
|
// the start of the error message:
|
||||||
|
// This is stdout \x1e This is stderr \x1e This is stdout again
|
||||||
|
std::string stdout_data_storage;
|
||||||
|
const char* stdout_data = data;
|
||||||
|
if (is_server_error_ || strchr(data, kServerErrorMarker)) {
|
||||||
|
// Only run this expensive code if necessary.
|
||||||
|
std::vector<std::string> parts =
|
||||||
|
absl::StrSplit(data, absl::ByChar(kServerErrorMarker));
|
||||||
|
for (size_t n = 0; n < parts.size(); ++n) {
|
||||||
|
if (is_server_error_) {
|
||||||
|
server_error_.append(parts[n]);
|
||||||
|
} else {
|
||||||
|
stdout_data_storage.append(parts[n]);
|
||||||
|
}
|
||||||
|
if (n + 1 < parts.size()) {
|
||||||
|
is_server_error_ = !is_server_error_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stdout_data = stdout_data_storage.c_str();
|
||||||
|
}
|
||||||
|
|
||||||
|
printer_.Print(stdout_data, false, Util::GetConsoleWidth());
|
||||||
|
if (!is_server_listening_) {
|
||||||
|
server_output_.append(stdout_data);
|
||||||
|
is_server_listening_ =
|
||||||
|
server_output_.find("Server is listening") != std::string::npos;
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::Sync() {
|
||||||
|
absl::Status status = SendOptions();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send options to server");
|
||||||
|
}
|
||||||
|
|
||||||
|
status = FindAndSendAllSourceFiles();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to find and send all source files");
|
||||||
|
}
|
||||||
|
|
||||||
|
status = ReceiveFileStats();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive file stats");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options_.delete_) {
|
||||||
|
status = ReceiveDeletedFiles();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive paths of deleted files");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
status = ReceiveFileIndices("missing", &missing_file_indices_);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive missing file indices");
|
||||||
|
}
|
||||||
|
status = SendMissingFiles();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send missing files");
|
||||||
|
}
|
||||||
|
|
||||||
|
status = ReceiveFileIndices("changed", &changed_file_indices_);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive changed file indices");
|
||||||
|
}
|
||||||
|
|
||||||
|
status = ReceiveSignaturesAndSendDelta();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive signatures and send delta");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sync point for shutdown (waits for the server to finish).
|
||||||
|
ShutdownRequest shutdown_request;
|
||||||
|
status = message_pump_.SendMessage(PacketType::kShutdown, shutdown_request);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send shutdown request");
|
||||||
|
}
|
||||||
|
|
||||||
|
ShutdownResponse response;
|
||||||
|
status = message_pump_.ReceiveMessage(PacketType::kShutdown, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive shutdown response");
|
||||||
|
}
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::DeployServer() {
|
||||||
|
assert(!server_process_);
|
||||||
|
|
||||||
|
std::string exe_dir;
|
||||||
|
absl::Status status = path::GetExeDir(&exe_dir);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to get exe directory");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string deploy_msg;
|
||||||
|
if (server_exit_code_ == kExitCodeNotFound) {
|
||||||
|
deploy_msg = "Server not deployed. Deploying...";
|
||||||
|
} else if (server_exit_code_ == kExitCodeCouldNotExecute) {
|
||||||
|
deploy_msg = "Server failed to start. Redeploying...";
|
||||||
|
} else if (server_exit_code_ == kServerExitCodeOutOfDate) {
|
||||||
|
deploy_msg = "Server outdated. Redeploying...";
|
||||||
|
} else {
|
||||||
|
deploy_msg = "Deploying server...";
|
||||||
|
}
|
||||||
|
printer_.Print(deploy_msg, true, Util::GetConsoleWidth());
|
||||||
|
|
||||||
|
// scp cdc_rsync_server to a temp location on the gamelet.
|
||||||
|
std::string remoteServerTmpPath =
|
||||||
|
absl::StrFormat("%s%s.%s", kRemoteToolsBinDir, kGgpServerFilename,
|
||||||
|
Util::GenerateUniqueId());
|
||||||
|
std::string localServerPath = path::Join(exe_dir, kGgpServerFilename);
|
||||||
|
status = remote_util_.Scp({localServerPath}, remoteServerTmpPath,
|
||||||
|
/*compress=*/true);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to copy cdc_rsync_server to instance");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make cdc_rsync_server executable.
|
||||||
|
status = remote_util_.Chmod("a+x", remoteServerTmpPath);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status,
|
||||||
|
"Failed to set executable flag on cdc_rsync_server");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make old file writable. Mv might fail to overwrite it, e.g. if someone made
|
||||||
|
// it read-only.
|
||||||
|
std::string remoteServerPath =
|
||||||
|
std::string(kRemoteToolsBinDir) + kGgpServerFilename;
|
||||||
|
status = remote_util_.Chmod("u+w", remoteServerPath, /*quiet=*/true);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG_DEBUG("chmod u+w %s failed (expected if file does not exist): %s",
|
||||||
|
remoteServerPath, status.ToString());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace old file by new file.
|
||||||
|
status = remote_util_.Mv(remoteServerTmpPath, remoteServerPath);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to replace '%s' by '%s'",
|
||||||
|
remoteServerPath, remoteServerTmpPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::SendOptions() {
|
||||||
|
LOG_INFO("Sending options");
|
||||||
|
|
||||||
|
SetOptionsRequest request;
|
||||||
|
request.set_destination(destination_);
|
||||||
|
request.set_delete_(options_.delete_);
|
||||||
|
request.set_recursive(options_.recursive);
|
||||||
|
request.set_verbosity(options_.verbosity);
|
||||||
|
request.set_whole_file(options_.whole_file);
|
||||||
|
request.set_compress(options_.compress);
|
||||||
|
request.set_relative(options_.relative);
|
||||||
|
|
||||||
|
for (const PathFilter::Rule& rule : path_filter_.GetRules()) {
|
||||||
|
SetOptionsRequest::FilterRule* filter_rule = request.add_filter_rules();
|
||||||
|
filter_rule->set_type(ToProtoType(rule.type));
|
||||||
|
filter_rule->set_pattern(rule.pattern);
|
||||||
|
}
|
||||||
|
|
||||||
|
request.set_checksum(options_.checksum);
|
||||||
|
request.set_dry_run(options_.dry_run);
|
||||||
|
request.set_existing(options_.existing);
|
||||||
|
if (options_.copy_dest) {
|
||||||
|
request.set_copy_dest(options_.copy_dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.SendMessage(PacketType::kSetOptions, request);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "SendDestination() failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::FindAndSendAllSourceFiles() {
|
||||||
|
LOG_INFO("Finding and sending all sources files");
|
||||||
|
|
||||||
|
Stopwatch stopwatch;
|
||||||
|
|
||||||
|
FileFinderAndSender file_finder(&path_filter_, &message_pump_, &progress_,
|
||||||
|
sources_dir_, options_.recursive,
|
||||||
|
options_.relative);
|
||||||
|
|
||||||
|
progress_.StartFindFiles();
|
||||||
|
for (const std::string& source : sources_) {
|
||||||
|
absl::Status status = file_finder.FindAndSendFiles(source);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
progress_.Finish();
|
||||||
|
|
||||||
|
RETURN_IF_ERROR(file_finder.Flush(), "Failed to flush file finder");
|
||||||
|
file_finder.ReleaseFiles(&files_);
|
||||||
|
|
||||||
|
LOG_INFO("Found and sent %u source files in %0.3f seconds", files_.size(),
|
||||||
|
stopwatch.ElapsedSeconds());
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::ReceiveFileStats() {
|
||||||
|
LOG_INFO("Receiving file stats");
|
||||||
|
|
||||||
|
SendFileStatsResponse response;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.ReceiveMessage(PacketType::kSendFileStats, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive SendFileStatsResponse");
|
||||||
|
}
|
||||||
|
|
||||||
|
progress_.ReportFileStats(
|
||||||
|
response.num_missing_files(), response.num_extraneous_files(),
|
||||||
|
response.num_matching_files(), response.num_changed_files(),
|
||||||
|
response.total_missing_bytes(), response.total_changed_client_bytes(),
|
||||||
|
response.total_changed_server_bytes(), response.num_missing_dirs(),
|
||||||
|
response.num_extraneous_dirs(), response.num_matching_dirs(),
|
||||||
|
options_.whole_file, options_.checksum, options_.delete_);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::ReceiveDeletedFiles() {
|
||||||
|
LOG_INFO("Receiving path of deleted files");
|
||||||
|
std::string current_directory;
|
||||||
|
|
||||||
|
progress_.StartDeleteFiles();
|
||||||
|
for (;;) {
|
||||||
|
AddDeletedFilesResponse response;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.ReceiveMessage(PacketType::kAddDeletedFiles, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive AddDeletedFilesResponse");
|
||||||
|
}
|
||||||
|
|
||||||
|
// An empty response indicates that all files have been sent.
|
||||||
|
if (response.files_size() == 0 && response.dirs_size() == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print info. Don't use path::Join(), it would mess up slashes.
|
||||||
|
for (const std::string& file : response.files()) {
|
||||||
|
progress_.ReportFileDeleted(response.directory() + file);
|
||||||
|
}
|
||||||
|
for (const std::string& dir : response.dirs()) {
|
||||||
|
progress_.ReportDirDeleted(response.directory() + dir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
progress_.Finish();
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::ReceiveFileIndices(
|
||||||
|
const char* file_type, std::vector<uint32_t>* file_indices) {
|
||||||
|
LOG_INFO("Receiving indices of %s files", file_type);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
AddFileIndicesResponse response;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.ReceiveMessage(PacketType::kAddFileIndices, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive AddFileIndicesResponse");
|
||||||
|
}
|
||||||
|
|
||||||
|
// An empty response indicates that all files have been sent.
|
||||||
|
if (response.client_indices_size() == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record file indices.
|
||||||
|
file_indices->insert(file_indices->end(), response.client_indices().begin(),
|
||||||
|
response.client_indices().end());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate indices.
|
||||||
|
for (uint32_t index : *file_indices) {
|
||||||
|
if (index >= files_.size()) {
|
||||||
|
return MakeStatus("Received invalid index %u", index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("Received %u indices of %s files", file_indices->size(), file_type);
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::SendMissingFiles() {
|
||||||
|
if (missing_file_indices_.empty()) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("Sending missing files");
|
||||||
|
|
||||||
|
if (options_.dry_run) {
|
||||||
|
for (uint32_t client_index : missing_file_indices_) {
|
||||||
|
const ClientFileInfo& file = files_[client_index];
|
||||||
|
progress_.StartCopy(file.path.substr(file.base_dir_len), file.size);
|
||||||
|
progress_.Finish();
|
||||||
|
}
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// This part is (optionally) compressed.
|
||||||
|
if (options_.compress) {
|
||||||
|
absl::Status status = StartCompressionStream();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to start compression process");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ParallelFileOpener file_opener(&files_, missing_file_indices_);
|
||||||
|
|
||||||
|
constexpr size_t kBufferSize = 16000;
|
||||||
|
for (uint32_t server_index = 0; server_index < missing_file_indices_.size();
|
||||||
|
++server_index) {
|
||||||
|
uint32_t client_index = missing_file_indices_[server_index];
|
||||||
|
const ClientFileInfo& file = files_[client_index];
|
||||||
|
|
||||||
|
LOG_INFO("%s", file.path);
|
||||||
|
progress_.StartCopy(file.path.substr(file.base_dir_len), file.size);
|
||||||
|
SendMissingFileDataRequest request;
|
||||||
|
request.set_server_index(server_index);
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.SendMessage(PacketType::kSendMissingFileData, request);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send SendMissingFileDataRequest");
|
||||||
|
}
|
||||||
|
ProgressTracker* progress = &progress_;
|
||||||
|
auto handler = [message_pump = &message_pump_, progress](const void* data,
|
||||||
|
size_t size) {
|
||||||
|
progress->ReportCopyProgress(size);
|
||||||
|
return message_pump->SendRawData(data, size);
|
||||||
|
};
|
||||||
|
|
||||||
|
FILE* fp = file_opener.GetNextOpenFile();
|
||||||
|
if (!fp) {
|
||||||
|
return MakeStatus("Failed to open file '%s'", file.path);
|
||||||
|
}
|
||||||
|
status = path::StreamReadFileContents(fp, kBufferSize, handler);
|
||||||
|
fclose(fp);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to read file %s", file.path);
|
||||||
|
}
|
||||||
|
|
||||||
|
progress_.Finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options_.compress) {
|
||||||
|
absl::Status status = StopCompressionStream();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to stop compression process");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::ReceiveSignaturesAndSendDelta() {
|
||||||
|
if (changed_file_indices_.empty()) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options_.dry_run) {
|
||||||
|
for (uint32_t client_index : changed_file_indices_) {
|
||||||
|
const ClientFileInfo& file = files_[client_index];
|
||||||
|
progress_.StartSync(file.path.substr(file.base_dir_len), file.size,
|
||||||
|
file.size);
|
||||||
|
progress_.ReportSyncProgress(file.size, file.size);
|
||||||
|
progress_.Finish();
|
||||||
|
}
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("Receiving signatures and sending deltas of changed files");
|
||||||
|
|
||||||
|
// This part is (optionally) compressed.
|
||||||
|
if (options_.compress) {
|
||||||
|
absl::Status status = StartCompressionStream();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to start compression process");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CdcInterface cdc(&message_pump_);
|
||||||
|
|
||||||
|
// Open files in parallel. Speeds up many small file case.
|
||||||
|
ParallelFileOpener file_opener(&files_, changed_file_indices_);
|
||||||
|
|
||||||
|
std::string signature_data;
|
||||||
|
for (uint32_t server_index = 0; server_index < changed_file_indices_.size();
|
||||||
|
++server_index) {
|
||||||
|
uint32_t client_index = changed_file_indices_[server_index];
|
||||||
|
const ClientFileInfo& file = files_[client_index];
|
||||||
|
|
||||||
|
SendSignatureResponse response;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.ReceiveMessage(PacketType::kAddSignatures, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive SendSignatureResponse");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate index.
|
||||||
|
if (response.client_index() != client_index) {
|
||||||
|
return MakeStatus("Received invalid index %u. Expected %u.",
|
||||||
|
response.client_index(), client_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("%s", file.path);
|
||||||
|
progress_.StartSync(file.path.substr(file.base_dir_len), file.size,
|
||||||
|
response.server_file_size());
|
||||||
|
|
||||||
|
FILE* fp = file_opener.GetNextOpenFile();
|
||||||
|
if (!fp) {
|
||||||
|
return MakeStatus("Failed to open file '%s'", file.path);
|
||||||
|
}
|
||||||
|
|
||||||
|
status = cdc.ReceiveSignatureAndCreateAndSendDiff(fp, &progress_);
|
||||||
|
fclose(fp);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to sync file %s", file.path);
|
||||||
|
}
|
||||||
|
|
||||||
|
progress_.Finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options_.compress) {
|
||||||
|
absl::Status status = StopCompressionStream();
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to stop compression process");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::StartCompressionStream() {
|
||||||
|
assert(!compression_stream_);
|
||||||
|
|
||||||
|
// Notify server that data is compressed from now on.
|
||||||
|
ToggleCompressionRequest request;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.SendMessage(PacketType::kToggleCompression, request);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to send ToggleCompressionRequest");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure the sender thread is idle.
|
||||||
|
message_pump_.FlushOutgoingQueue();
|
||||||
|
|
||||||
|
// Set up compression stream.
|
||||||
|
uint32_t num_threads = std::thread::hardware_concurrency();
|
||||||
|
compression_stream_ = std::make_unique<ZstdStream>(
|
||||||
|
&socket_, options_.compress_level, num_threads);
|
||||||
|
|
||||||
|
// Redirect the |message_pump_| output to the compression stream.
|
||||||
|
message_pump_.RedirectOutput([this](const void* data, size_t size) {
|
||||||
|
LOG_VERBOSE("Compressing packet of size %u", size);
|
||||||
|
return compression_stream_->Write(data, size);
|
||||||
|
});
|
||||||
|
|
||||||
|
// The pipes are now set up like this:
|
||||||
|
// |message_pump_| -> |compression_stream_| -> |socket_|.
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status GgpRsyncClient::StopCompressionStream() {
|
||||||
|
assert(compression_stream_);
|
||||||
|
|
||||||
|
// Finish writing to |compression_process_|'s stdin and change back to
|
||||||
|
// writing to the actual network socket.
|
||||||
|
message_pump_.FlushOutgoingQueue();
|
||||||
|
message_pump_.RedirectOutput(nullptr);
|
||||||
|
|
||||||
|
// Flush compression stream and reset.
|
||||||
|
RETURN_IF_ERROR(compression_stream_->Flush(),
|
||||||
|
"Failed to flush compression stream");
|
||||||
|
compression_stream_.reset();
|
||||||
|
|
||||||
|
// Wait for the server ack. This must be done before sending more data.
|
||||||
|
ToggleCompressionResponse response;
|
||||||
|
absl::Status status =
|
||||||
|
message_pump_.ReceiveMessage(PacketType::kToggleCompression, &response);
|
||||||
|
if (!status.ok()) {
|
||||||
|
return WrapStatus(status, "Failed to receive ToggleCompressionResponse");
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
132
cdc_rsync/cdc_rsync_client.h
Normal file
132
cdc_rsync/cdc_rsync_client.h
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_CDC_RSYNC_CLIENT_H_
|
||||||
|
#define CDC_RSYNC_CDC_RSYNC_CLIENT_H_
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "cdc_rsync/base/message_pump.h"
|
||||||
|
#include "cdc_rsync/cdc_rsync.h"
|
||||||
|
#include "cdc_rsync/client_socket.h"
|
||||||
|
#include "cdc_rsync/progress_tracker.h"
|
||||||
|
#include "common/path_filter.h"
|
||||||
|
#include "common/port_manager.h"
|
||||||
|
#include "common/remote_util.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class Process;
|
||||||
|
class ZstdStream;
|
||||||
|
|
||||||
|
class GgpRsyncClient {
|
||||||
|
public:
|
||||||
|
GgpRsyncClient(const Options& options, PathFilter filter,
|
||||||
|
std::string sources_dir, std::vector<std::string> sources,
|
||||||
|
std::string destination);
|
||||||
|
|
||||||
|
~GgpRsyncClient();
|
||||||
|
|
||||||
|
// Deploys the server if necessary, starts it and runs the rsync procedure.
|
||||||
|
absl::Status Run();
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Starts the server process. If the method returns a status with tag
|
||||||
|
// |kTagDeployServer|, Run() calls DeployServer() and tries again.
|
||||||
|
absl::Status StartServer();
|
||||||
|
|
||||||
|
// Stops the server process.
|
||||||
|
absl::Status StopServer();
|
||||||
|
|
||||||
|
// Handler for stdout and stderr data emitted by the server.
|
||||||
|
absl::Status HandleServerOutput(const char* data);
|
||||||
|
|
||||||
|
// Runs the rsync procedure.
|
||||||
|
absl::Status Sync();
|
||||||
|
|
||||||
|
// Copies all gamelet components to the gamelet.
|
||||||
|
absl::Status DeployServer();
|
||||||
|
|
||||||
|
// Sends relevant options to the server.
|
||||||
|
absl::Status SendOptions();
|
||||||
|
|
||||||
|
// Finds all source files and sends the file infos to the server.
|
||||||
|
absl::Status FindAndSendAllSourceFiles();
|
||||||
|
|
||||||
|
// Receives the stats from the file diffs (e.g. number of missing, changed
|
||||||
|
// etc. files) from the server.
|
||||||
|
absl::Status ReceiveFileStats();
|
||||||
|
|
||||||
|
// Receives paths of deleted files and prints them out.
|
||||||
|
absl::Status ReceiveDeletedFiles();
|
||||||
|
|
||||||
|
// Receives file indices from the server. Used for missing and changed files.
|
||||||
|
absl::Status ReceiveFileIndices(const char* file_type,
|
||||||
|
std::vector<uint32_t>* file_indices);
|
||||||
|
|
||||||
|
// Copies missing files to the server.
|
||||||
|
absl::Status SendMissingFiles();
|
||||||
|
|
||||||
|
// Core rsync algorithm. Receives signatures of changed files from server,
|
||||||
|
// calculates the diffs and sends them to the server.
|
||||||
|
absl::Status ReceiveSignaturesAndSendDelta();
|
||||||
|
|
||||||
|
// Start the zstd compression stream. Used before file copy and diff.
|
||||||
|
absl::Status StartCompressionStream();
|
||||||
|
|
||||||
|
// Stops the zstd compression stream.
|
||||||
|
absl::Status StopCompressionStream();
|
||||||
|
|
||||||
|
Options options_;
|
||||||
|
PathFilter path_filter_;
|
||||||
|
const std::string sources_dir_;
|
||||||
|
std::vector<std::string> sources_;
|
||||||
|
const std::string destination_;
|
||||||
|
|
||||||
|
WinProcessFactory process_factory_;
|
||||||
|
RemoteUtil remote_util_;
|
||||||
|
PortManager port_manager_;
|
||||||
|
ClientSocket socket_;
|
||||||
|
MessagePump message_pump_{&socket_, MessagePump::PacketReceivedDelegate()};
|
||||||
|
ConsoleProgressPrinter printer_;
|
||||||
|
ProgressTracker progress_;
|
||||||
|
std::unique_ptr<ZstdStream> compression_stream_;
|
||||||
|
|
||||||
|
std::unique_ptr<Process> server_process_;
|
||||||
|
std::string server_output_; // Written in a background thread. Do not access
|
||||||
|
std::string server_error_; // while the server process is active.
|
||||||
|
int server_exit_code_ = 0;
|
||||||
|
std::atomic_bool is_server_listening_{false};
|
||||||
|
bool is_server_error_ = false;
|
||||||
|
|
||||||
|
// All source files found on the client.
|
||||||
|
std::vector<ClientFileInfo> files_;
|
||||||
|
|
||||||
|
// All source dirs found on the client.
|
||||||
|
std::vector<ClientDirInfo> dirs_;
|
||||||
|
|
||||||
|
// Indices (into files_) of files that are missing on the server.
|
||||||
|
std::vector<uint32_t> missing_file_indices_;
|
||||||
|
|
||||||
|
// Indices (into files_) of files that exist, but are different on the server.
|
||||||
|
std::vector<uint32_t> changed_file_indices_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_CDC_RSYNC_CLIENT_H_
|
||||||
43
cdc_rsync/client_file_info.h
Normal file
43
cdc_rsync/client_file_info.h
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_CLIENT_FILE_INFO_H_
|
||||||
|
#define CDC_RSYNC_CLIENT_FILE_INFO_H_
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
struct ClientFileInfo {
|
||||||
|
std::string path;
|
||||||
|
uint64_t size;
|
||||||
|
uint32_t base_dir_len;
|
||||||
|
|
||||||
|
ClientFileInfo(const std::string& path, uint64_t size, uint32_t base_dir_len)
|
||||||
|
: path(path), size(size), base_dir_len(base_dir_len) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ClientDirInfo {
|
||||||
|
std::string path;
|
||||||
|
uint32_t base_dir_len;
|
||||||
|
|
||||||
|
ClientDirInfo(const std::string& path, uint32_t base_dir_len)
|
||||||
|
: path(path), base_dir_len(base_dir_len) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_CLIENT_FILE_INFO_H_
|
||||||
174
cdc_rsync/client_socket.cc
Normal file
174
cdc_rsync/client_socket.cc
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "cdc_rsync/client_socket.h"
|
||||||
|
|
||||||
|
#include <winsock2.h>
|
||||||
|
#include <ws2tcpip.h>
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
|
#include "common/log.h"
|
||||||
|
#include "common/status.h"
|
||||||
|
#include "common/util.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Creates a status with the given |message| and the last WSA error.
|
||||||
|
// Assigns Tag::kSocketEof for WSAECONNRESET errors.
|
||||||
|
absl::Status MakeSocketStatus(const char* message) {
|
||||||
|
const int err = WSAGetLastError();
|
||||||
|
absl::Status status = MakeStatus("%s: %s", message, Util::GetWin32Error(err));
|
||||||
|
if (err == WSAECONNRESET) {
|
||||||
|
status = SetTag(status, Tag::kSocketEof);
|
||||||
|
}
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
struct SocketInfo {
|
||||||
|
SOCKET socket;
|
||||||
|
|
||||||
|
SocketInfo() : socket(INVALID_SOCKET) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
ClientSocket::ClientSocket() = default;
|
||||||
|
|
||||||
|
ClientSocket::~ClientSocket() { Disconnect(); }
|
||||||
|
|
||||||
|
absl::Status ClientSocket::Connect(int port) {
|
||||||
|
WSADATA wsaData;
|
||||||
|
int result = WSAStartup(MAKEWORD(2, 2), &wsaData);
|
||||||
|
if (result != 0) {
|
||||||
|
return MakeStatus("WSAStartup() failed: %i", result);
|
||||||
|
}
|
||||||
|
|
||||||
|
addrinfo hints;
|
||||||
|
ZeroMemory(&hints, sizeof(hints));
|
||||||
|
hints.ai_family = AF_INET;
|
||||||
|
hints.ai_socktype = SOCK_STREAM;
|
||||||
|
hints.ai_protocol = IPPROTO_TCP;
|
||||||
|
|
||||||
|
// Resolve the server address and port.
|
||||||
|
addrinfo* addr_infos = nullptr;
|
||||||
|
result = getaddrinfo("localhost", std::to_string(port).c_str(), &hints,
|
||||||
|
&addr_infos);
|
||||||
|
if (result != 0) {
|
||||||
|
WSACleanup();
|
||||||
|
return MakeStatus("getaddrinfo() failed: %i", result);
|
||||||
|
}
|
||||||
|
|
||||||
|
socket_info_ = std::make_unique<SocketInfo>();
|
||||||
|
int count = 0;
|
||||||
|
for (addrinfo* curr = addr_infos; curr; curr = curr->ai_next, count++) {
|
||||||
|
socket_info_->socket =
|
||||||
|
socket(addr_infos->ai_family, addr_infos->ai_socktype,
|
||||||
|
addr_infos->ai_protocol);
|
||||||
|
if (socket_info_->socket == INVALID_SOCKET) {
|
||||||
|
LOG_DEBUG("socket() failed for addr_info %i: %s", count,
|
||||||
|
Util::GetWin32Error(WSAGetLastError()).c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect to server.
|
||||||
|
result = connect(socket_info_->socket, curr->ai_addr,
|
||||||
|
static_cast<int>(curr->ai_addrlen));
|
||||||
|
if (result == SOCKET_ERROR) {
|
||||||
|
LOG_DEBUG("connect() failed for addr_info %i: %i", count, result);
|
||||||
|
closesocket(socket_info_->socket);
|
||||||
|
socket_info_->socket = INVALID_SOCKET;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success!
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
freeaddrinfo(addr_infos);
|
||||||
|
|
||||||
|
if (socket_info_->socket == INVALID_SOCKET) {
|
||||||
|
socket_info_.reset();
|
||||||
|
WSACleanup();
|
||||||
|
return MakeStatus("Unable to connect to port %i", port);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("Client socket connected to port %i", port);
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClientSocket::Disconnect() {
|
||||||
|
if (!socket_info_) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (socket_info_->socket != INVALID_SOCKET) {
|
||||||
|
closesocket(socket_info_->socket);
|
||||||
|
socket_info_->socket = INVALID_SOCKET;
|
||||||
|
}
|
||||||
|
|
||||||
|
socket_info_.reset();
|
||||||
|
WSACleanup();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status ClientSocket::Send(const void* buffer, size_t size) {
|
||||||
|
int result = send(socket_info_->socket, static_cast<const char*>(buffer),
|
||||||
|
static_cast<int>(size), /*flags */ 0);
|
||||||
|
if (result == SOCKET_ERROR) {
|
||||||
|
return MakeSocketStatus("send() failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status ClientSocket::Receive(void* buffer, size_t size,
|
||||||
|
bool allow_partial_read,
|
||||||
|
size_t* bytes_received) {
|
||||||
|
*bytes_received = 0;
|
||||||
|
if (size == 0) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
int flags = allow_partial_read ? 0 : MSG_WAITALL;
|
||||||
|
int bytes_read = recv(socket_info_->socket, static_cast<char*>(buffer),
|
||||||
|
static_cast<int>(size), flags);
|
||||||
|
if (bytes_read == SOCKET_ERROR) {
|
||||||
|
return MakeSocketStatus("recv() failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bytes_read == 0) {
|
||||||
|
// EOF
|
||||||
|
return SetTag(MakeStatus("EOF detected"), Tag::kSocketEof);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bytes_read != size && !allow_partial_read) {
|
||||||
|
// Can this happen?
|
||||||
|
return MakeStatus("Partial read");
|
||||||
|
}
|
||||||
|
|
||||||
|
*bytes_received = bytes_read;
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status ClientSocket::ShutdownSendingEnd() {
|
||||||
|
int result = shutdown(socket_info_->socket, SD_SEND);
|
||||||
|
if (result == SOCKET_ERROR) {
|
||||||
|
return MakeSocketStatus("shutdown() failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
53
cdc_rsync/client_socket.h
Normal file
53
cdc_rsync/client_socket.h
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_CLIENT_SOCKET_H_
|
||||||
|
#define CDC_RSYNC_CLIENT_SOCKET_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "absl/status/status.h"
|
||||||
|
#include "cdc_rsync/base/socket.h"
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
class ClientSocket : public Socket {
|
||||||
|
public:
|
||||||
|
ClientSocket();
|
||||||
|
~ClientSocket();
|
||||||
|
|
||||||
|
// Connects to localhost on |port|.
|
||||||
|
absl::Status Connect(int port);
|
||||||
|
|
||||||
|
// Disconnects again. No-op if not connected.
|
||||||
|
void Disconnect();
|
||||||
|
|
||||||
|
// Shuts down the sending end of the socket. This will interrupt any receive
|
||||||
|
// calls on the server and shut it down.
|
||||||
|
absl::Status ShutdownSendingEnd();
|
||||||
|
|
||||||
|
// Socket:
|
||||||
|
absl::Status Send(const void* buffer, size_t size) override;
|
||||||
|
absl::Status Receive(void* buffer, size_t size, bool allow_partial_read,
|
||||||
|
size_t* bytes_received) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unique_ptr<struct SocketInfo> socket_info_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_CLIENT_SOCKET_H_
|
||||||
2
cdc_rsync/cpp.hint
Normal file
2
cdc_rsync/cpp.hint
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
#define CDC_RSYNC_API __declspec(dllexport)
|
||||||
|
#define CDC_RSYNC_API __declspec(dllimport)
|
||||||
29
cdc_rsync/dllmain.cc
Normal file
29
cdc_rsync/dllmain.cc
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#define WIN32_LEAN_AND_MEAN
|
||||||
|
#include <windows.h>
|
||||||
|
|
||||||
|
BOOL APIENTRY DllMain(HMODULE /* hModule */, DWORD ul_reason_for_call,
|
||||||
|
LPVOID /* lpReserved */
|
||||||
|
) {
|
||||||
|
switch (ul_reason_for_call) {
|
||||||
|
case DLL_PROCESS_ATTACH:
|
||||||
|
case DLL_THREAD_ATTACH:
|
||||||
|
case DLL_THREAD_DETACH:
|
||||||
|
case DLL_PROCESS_DETACH:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return TRUE;
|
||||||
|
}
|
||||||
54
cdc_rsync/error_messages.h
Normal file
54
cdc_rsync/error_messages.h
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2022 Google LLC
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CDC_RSYNC_ERROR_MESSAGES_H_
|
||||||
|
#define CDC_RSYNC_ERROR_MESSAGES_H_
|
||||||
|
|
||||||
|
namespace cdc_ft {
|
||||||
|
|
||||||
|
// Server connection timed out. SSH probably stale.
|
||||||
|
constexpr char kMsgFmtConnectionTimeout[] =
|
||||||
|
"Server connection timed out. Please re-run 'ggp ssh init' and verify that "
|
||||||
|
"the IP '%s' and the port '%i' are correct.";
|
||||||
|
|
||||||
|
// Server connection timed out and IP was not passed in. Probably network error.
|
||||||
|
constexpr char kMsgConnectionTimeoutWithIp[] =
|
||||||
|
"Server connection timed out. Please check your network connection.";
|
||||||
|
|
||||||
|
// Receiving pipe end was shut down unexpectedly.
|
||||||
|
constexpr char kMsgConnectionLost[] =
|
||||||
|
"The connection to the instance was shut down unexpectedly.";
|
||||||
|
|
||||||
|
// Binding to the port failed.
|
||||||
|
constexpr char kMsgAddressInUse[] =
|
||||||
|
"Failed to establish a connection to the instance. All ports are already "
|
||||||
|
"in use. This can happen if another instance of this command is running. "
|
||||||
|
"Currently, only 10 simultaneous connections are supported.";
|
||||||
|
|
||||||
|
// Deployment failed even though gamelet components were copied successfully.
|
||||||
|
constexpr char kMsgDeployFailed[] =
|
||||||
|
"Failed to deploy the instance components for unknown reasons. "
|
||||||
|
"Please report this issue.";
|
||||||
|
|
||||||
|
// Picking an instance is not allowed in quiet mode.
|
||||||
|
constexpr char kMsgInstancePickerNotAvailableInQuietMode[] =
|
||||||
|
"Multiple gamelet instances are reserved, but the instance picker is not "
|
||||||
|
"available in quiet mode. Please specify --instance or remove -q resp. "
|
||||||
|
"--quiet.";
|
||||||
|
|
||||||
|
} // namespace cdc_ft
|
||||||
|
|
||||||
|
#endif // CDC_RSYNC_ERROR_MESSAGES_H_
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user