From 40ee842c5d279d5736574d6c92dffcda7763f3fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Wed, 16 Feb 2022 19:18:22 +0000 Subject: [PATCH 1/7] decomposedfs: shard nodes per space MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Squashed commit of the following: commit 4a5e9aad8632aefbdd47229f873272af4dab320d Merge: e60fb6b4 a6eec6a6 Author: Jörn Friedrich Dreyer Date: Wed Feb 16 19:11:25 2022 +0000 Merge branch 'edge' into nodes-per-space commit e60fb6b4cc8f3f89d9f802cf98889b981557a5dd Author: Jörn Friedrich Dreyer Date: Wed Feb 16 15:48:53 2022 +0000 fix restore Signed-off-by: Jörn Friedrich Dreyer commit 15db9adf3bedac9599490a73b6353d7507486483 Author: Jörn Friedrich Dreyer Date: Wed Feb 16 15:29:33 2022 +0000 fix list trash Signed-off-by: Jörn Friedrich Dreyer commit e4c1191f5d0571a00cd897e8e54fa8801752cc39 Author: Jörn Friedrich Dreyer Date: Wed Feb 16 13:55:24 2022 +0000 introduce new layout Signed-off-by: Jörn Friedrich Dreyer commit dfd5eb38289b8e39827dbbce5d62f192971bf07b Merge: 2698dc12 df1264de Author: Jörn Friedrich Dreyer Date: Tue Feb 15 13:15:19 2022 +0000 Merge branch 'edge' into nodes-per-space commit 2698dc1296f7cd4fa79d9f11795894e67d97e206 Author: Jörn Friedrich Dreyer Date: Tue Feb 15 13:13:33 2022 +0000 start using correct node path Signed-off-by: Jörn Friedrich Dreyer commit 2401678a557c838dc62a32330886d4207722e99a Merge: 02da0465 d217886c Author: Jörn Friedrich Dreyer Date: Tue Feb 15 10:40:30 2022 +0000 Merge branch 'edge' into nodes-per-space commit 02da0465063bb33544131134681b88f93077c0a7 Author: Jörn Friedrich Dreyer Date: Tue Feb 15 08:22:41 2022 +0000 small fixes Signed-off-by: Jörn Friedrich Dreyer commit 752c6926ff65206bf9713e85ee1c77c59c5f73d6 Merge: cbc1bed6 c7e66072 Author: Jörn Friedrich Dreyer Date: Mon Feb 14 10:11:35 2022 +0000 Merge branch 'edge' into nodes-per-space commit cbc1bed67cd3b0af83439869f28bb26757727a58 Author: Jörn Friedrich Dreyer Date: Mon Feb 14 10:03:41 2022 +0000 remove duplicate const Signed-off-by: Jörn Friedrich Dreyer commit 5a666db332228371ac93243d39297c2804e4f479 Author: David Christofas Date: Wed Jan 12 17:15:05 2022 +0100 store nodes of a space inside of the space This is still work in progress. In this state the node_test.go run successfully. commit 20a38e4aebd613561ecff33faec87b38883d8610 Author: kobergj Date: Fri Feb 11 10:31:52 2022 +0100 Events (#2522) * first draft for event system - includes example Signed-off-by: jkoberg * add event middleware Signed-off-by: jkoberg * events: distinguish grantee userid and groupid Signed-off-by: Jörn Friedrich Dreyer * seperate consumer from publisher Signed-off-by: jkoberg * code review suggestions Signed-off-by: jkoberg * simplify example Signed-off-by: jkoberg * add changelog Signed-off-by: jkoberg * make nats server configurable Signed-off-by: jkoberg * add license headers Signed-off-by: jkoberg * cheat the linter Signed-off-by: jkoberg Co-authored-by: Jörn Friedrich Dreyer commit 65bb12a803ce837cd7d188b0f3b2a84f95893a21 Author: Klaas Freitag Date: Thu Feb 10 15:04:17 2022 +0100 Consolidate all metadata Get's and Set's to central functions. (#2512) * Consolidate all metadata Get's and Set's to central functions. In the decomposed FS, access to xattr was spread all over. This patch consolidates that to use either the Node.SetMetadata() or xattrs.Set(). This allows us to hook in indexing for example. * hound and typos * Add changelog. * Some more fixes to use xattrs functions. * Fix function name in tests. * Fix some linting hints. * Even more linter warning fixes * Even more linting issues * And another iteration * Linter I hate you * Use proper Int formatting Co-authored-by: David Christofas * Update pkg/storage/utils/decomposedfs/node/node.go Co-authored-by: David Christofas * Update pkg/storage/utils/decomposedfs/node/node.go Co-authored-by: David Christofas * Update pkg/storage/utils/decomposedfs/xattrs/xattrs.go Co-authored-by: David Christofas * again linting * use correct variable in decomposedfs Co-authored-by: Jörn Friedrich Dreyer Co-authored-by: David Christofas commit f283b98d127d543132345e45350d07341517ca32 Author: Andre Duffeck Date: Thu Feb 10 14:48:17 2022 +0100 Upgrade to ginkgo v2, increase test coverage (#2526) * Upgrade to ginkgo v2 * Fix root info being counted twice * Do not try to list child non-containers * Improve generating response xml * DRY up code * Increase test coverage, port EncodePath benchmark to ginkgo Also add an assertion that its performanc does not decrease too much. * Add changelog * Fix hound issue * Add missing license header * Fix linter issue commit c2c5a452349591bd9092dad23a66b5bc9c7521cd Author: Klaas Freitag Date: Thu Feb 10 13:56:22 2022 +0100 Some error cleanup steps in the decomposed FS (#2511) * Some error cleanup steps in the decomposed FS * Hound and changelog added. * Remove punctuation * Make CI happy * Improved error logging. Co-authored-by: David Christofas * Update pkg/storage/utils/decomposedfs/decomposedfs.go Co-authored-by: David Christofas Co-authored-by: David Christofas Co-authored-by: Jörn Friedrich Dreyer commit b522aab1054357d77eedb73f1aeee57c8246cba3 Author: Jörn Friedrich Dreyer Date: Wed Feb 9 11:22:41 2022 +0100 decomposedfs: add locking support (#2460) * decomposedfs: add locking support Signed-off-by: Jörn Friedrich Dreyer * add lock implementation, refactor error handling Signed-off-by: Jörn Friedrich Dreyer * introduce lock ctx Signed-off-by: Jörn Friedrich Dreyer * add locked error and status code mapping Signed-off-by: Jörn Friedrich Dreyer * read lockid from opaque into ctx for delete Signed-off-by: Jörn Friedrich Dreyer * decomposedfs: make delete respect lock Signed-off-by: Jörn Friedrich Dreyer * ocdav: simplify error code mapping Signed-off-by: Jörn Friedrich Dreyer * adjust to cs3 lock api update Signed-off-by: Jörn Friedrich Dreyer * utils: add and read plain opaque entries Signed-off-by: Jörn Friedrich Dreyer * fix delete lock Signed-off-by: Jörn Friedrich Dreyer * invalidate stat cache when setting lock Signed-off-by: Jörn Friedrich Dreyer * fix a few linter items Signed-off-by: Jörn Friedrich Dreyer * linter happyness Signed-off-by: Jörn Friedrich Dreyer * ocdav: implment unlock Signed-off-by: Jörn Friedrich Dreyer * lock caching and unlocking Signed-off-by: Jörn Friedrich Dreyer * read locks on folders Signed-off-by: Jörn Friedrich Dreyer * check lock on writes Signed-off-by: Jörn Friedrich Dreyer * always assume locktype write Signed-off-by: Jörn Friedrich Dreyer * ocis only supports exclusive locks Signed-off-by: Jörn Friedrich Dreyer * add precodition failed errtype * handle preconditionfailed in status conversion * omit empty xml tags in lockdiscovery * handle locked status on LOCK * document oc10 lock behaviour as comment * ocdav: handle errors for LOCK and UNLOCK * storage: change fs.Unlock signature * decomposedfs: refactor checkLock * add LookupReferenceForPath comment Signed-off-by: Jörn Friedrich Dreyer * gateway: ignore unimplemented add/denyGrant response Signed-off-by: Jörn Friedrich Dreyer * use http.StatusMethodNotAllowed for mkcol error case Signed-off-by: Jörn Friedrich Dreyer * make hound happy Signed-off-by: Jörn Friedrich Dreyer * ocdav: be more tolerant with the Lock-Token header Signed-off-by: Jörn Friedrich Dreyer * ocdav: allow setting infinity timeout Signed-off-by: Jörn Friedrich Dreyer * ocdav: use custem owner innerxml without href * update expected failures * ocdav: return conflict on missing intermediate target dir Signed-off-by: Jörn Friedrich Dreyer * decomposedfs: use checkLock() in the rest of cases Signed-off-by: Jörn Friedrich Dreyer * Do not choke when checking the lock of non-existent nodes * Linter fixes * Refactor: Move lock handling into the node domain * Also delete the lock file when deleting a node * Extract node lock-handling code into separate file, start writing tests * Add missing license header * Fix relocking already-locked nodes. Increase test coverage * Hounds be happy * Add unit tests for ReadLock and RefreshLock * Also cover readLocksIntoOpaque in the tests * Fix linter issue * Do not log full nodes, it's very expensive and not very helpful * Start adding grpc integration tests for locking * Fix setting the lock for file uploads * Allow for locking space-based resources * Make sure to log the error before it's getting overwritten * reuse xml.EscapeText directly Co-authored-by: David Christofas * decomposedfs: use defer to close file when unlocking resource Signed-off-by: Jörn Friedrich Dreyer * ocdav: explain why some http states are commented Signed-off-by: Jörn Friedrich Dreyer * ocdav: use http header status constants Signed-off-by: Jörn Friedrich Dreyer * clarify add/deny grant log Signed-off-by: Jörn Friedrich Dreyer * update expected failures Signed-off-by: Jörn Friedrich Dreyer Co-authored-by: André Duffeck Co-authored-by: David Christofas commit 4c185f9d354d1e7cf13d1d341fc734cffe5d663b Author: Michael Barz Date: Tue Feb 8 14:17:16 2022 +0100 remove creation of .space folder (#2519) * remove creation of .space folder * fix unit tests * fix integration tests commit 68080f0bcf873d01f8efd825ec4f76400aad6838 Author: PKiran <39373750+kiranparajuli589@users.noreply.github.com> Date: Tue Feb 8 17:58:00 2022 +0545 bump core commit id and update the expected failures (#2518) commit be39db39c6acd3f168652b7d410c5be251989ad4 Author: David Christofas Date: Tue Feb 8 10:16:30 2022 +0100 Cleanup code (#2516) * pre-compile the chunking regex * reduce type conversions * add changelog commit 57591c1655468809233a272b6e4ff6655a06a524 Author: Phil Davis Date: Mon Feb 7 12:16:34 2022 +0545 bump CORE_COMMITID to use new getPersonalSpaceIdForUser code in API tests (#2505) commit 064ec8279012878cce958a225ee98a7d853a23d4 Author: David Christofas Date: Fri Feb 4 18:29:01 2022 +0100 fix propfind listing for files (#2506) commit 1aadbc8eb6787ec1b9b194773a5859d4db23feb2 Author: kobergj Date: Fri Feb 4 12:55:25 2022 +0100 [tests-only] Fix panic in storageSpaceFromNode (#2504) * fix panic in storageSpaceFromNode Signed-off-by: jkoberg * check node for being nil Signed-off-by: jkoberg * Revert "check node for being nil" This reverts commit e38228e1d91c7a21c8c1d010020b66740d1ae354. commit 32384aa4d8a04112e3f2aed63d2c51ea073bda50 Author: Willy Kloucek <34452982+wkloucek@users.noreply.github.com> Date: Fri Feb 4 12:48:23 2022 +0100 unprotected ocs config endpoint (#2503) * remove protection from ocs config endpoint * remove passing ocs config test from expected failures commit 27b309125cf832aa5364d6ebfc6c11a37b9c3e7b Author: kobergj Date: Fri Feb 4 10:49:29 2022 +0100 Restoring Spaces (#2458) * add restore functionality to decomposedfs Signed-off-by: jkoberg * add changelog item Signed-off-by: jkoberg * find trashed spaces Signed-off-by: jkoberg * add includeTrashed paramter to ListSpaces call Signed-off-by: jkoberg * only add * if neccessary Signed-off-by: jkoberg * move instead delete to make restore possible Signed-off-by: jkoberg * pass trashed information via Opaque Signed-off-by: jkoberg * don't return trashed space for non owners Signed-off-by: jkoberg * revert includeTrashed hack Signed-off-by: jkoberg * no need to symlink trashed spaces any more Signed-off-by: jkoberg * bad solution - just wanna paint it green Signed-off-by: jkoberg * allow listing of deleted spaces Signed-off-by: jkoberg commit 2754a380d3267a7c9b29401f06819497f29d71c7 Author: Phil Davis Date: Fri Feb 4 14:13:58 2022 +0545 Bump CORE_COMMITID for API tests (#2496) commit a8bfe6f5d12e1cc6848f892870775e380621af89 Author: David Christofas Date: Thu Feb 3 17:54:58 2022 +0100 add grants to list-spaces (#2498) When listing storage spaces we also want to have the spaces grants to be able to show who has access to the space. Co-authored-by: kobergj commit 05aca63bc2eca56ee4ab9a628d0eb9ae6fc45a05 Author: Michael Barz Date: Thu Feb 3 16:52:34 2022 +0100 invalidate cache when modifying or deleting a space (#2500) commit 99fdf2dc307834efc884cd1e0c30b8d947e630e6 Author: David Christofas Date: Thu Feb 3 16:21:45 2022 +0100 fix spaces stat requests (#2501) commit 0443b41fa571b44aa615aa24c4638ebe957d2b63 Author: Willy Kloucek <34452982+wkloucek@users.noreply.github.com> Date: Tue Feb 1 21:11:17 2022 +0100 [edge] remove the ownCloud storage driver (#2495) * remove owncloud storage driver * remove owncloud storage driver integration tests and demo config files commit affffea4343f04c11c69a903fbb0b96a8b959dff Author: kobergj Date: Tue Feb 1 11:04:33 2022 +0100 [tests-only] Can't create folder names which are subset of "Shares" (#2484) * test fix with easy implementation Signed-off-by: jkoberg * use same logic for other above path also Signed-off-by: jkoberg * add special case handling to isSubPath Signed-off-by: jkoberg * refine isSubpath logic Signed-off-by: jkoberg * Stat on MKCOL before creating Signed-off-by: jkoberg * update comment to not include typos Signed-off-by: jkoberg * handle error correctly for MKCOL Signed-off-by: jkoberg commit a230234dbd3332ac236c8167969a2295629a1937 Author: Amrita <54478846+amrita-shrestha@users.noreply.github.com> Date: Fri Jan 28 20:35:07 2022 +0545 Bump the commit id for tests (#2490) commit 749849177f54219bdf1fd32bbbacb3067ee45f03 Author: Andre Duffeck Date: Fri Jan 28 15:32:48 2022 +0100 Make owncloudsql spaces aware (#2472) * Add ListStorages method * Implement ListStorageSpaces in owncloudsql * ResourceInfos do no longer contain the full path but only the basename * Handle references relative to a root * Fix space lookup, extract space functionality into a separate file * Use oc_mounts to find the storage roots This way it also works with storages with hashed IDs (that happens when the id exceeds a certain length). * Fix shares * Implement GetPath, fix GetPathById * Include the storage id when listing shares * Fix accepting declined shares * Ignore setting-grants-not-supported errors, storage grants are optional * Add changelog * Fix missing storage id from resource info * Fix field mask * Do not log error messages for unsupported grant calls * Fix hound issue * Fix changelog URL * Fix linter issue * Remove unfinished GetPath() code * Adapt expected failures * Cache user lookups in the oc10-sql share manager That leads to a massive performance boost. commit 5d83deddef3e3626e65e3e6cadff13d97d8ab686 Author: Jörn Friedrich Dreyer Date: Fri Jan 28 09:20:13 2022 +0100 update cs3apis to include lock api changes (#2487) Signed-off-by: Jörn Friedrich Dreyer commit 579aab4e3d02961075019942c1c28abe0fc01581 Author: Phil Davis Date: Fri Jan 28 00:26:51 2022 +0545 Add end-of-line to expected-failures files (#2483) commit 2e2a3f0c04286f031fef5adff34013588c00894d Author: David Christofas Date: Thu Jan 27 15:46:06 2022 +0100 [tests-only] Merge master into edge (#2473) * [Build-deps] Additional rules for CODEOWNERS (#2323) * Remove share refs from trashbin (#2298) * Public link propfind (#2315) * fix public share type in propfinds (#2316) * Bump core commit id for tests (#2331) * Revert "Fix content disposition (#2303)" (#2332) This reverts commit 3cba22371b78213f2e49197c2783220331a264bd. * [Build-deps]: Bump github.com/gomodule/redigo from 1.8.5 to 1.8.6 (#2326) * [Build-deps]: Bump github.com/mitchellh/mapstructure from 1.4.2 to 1.4.3 (#2324) * [Build-deps]: Bump github.com/aws/aws-sdk-go from 1.42.9 to 1.42.19 (#2325) * fix app provider new file action and improve app provider error codes (#2210) * Parse URL path to determine file name (#2346) * v1.17.0 * handle non existent spaces gracefully (#2354) * Bump core commit id for tests (#2365) * [Build-deps]: Bump github.com/minio/minio-go/v7 from 7.0.16 to 7.0.18 (#2363) * [Build-deps]: Bump github.com/ReneKroon/ttlcache/v2 from 2.9.0 to 2.10.0 (#2358) * [Build-deps]: Bump go.opentelemetry.io/otel/exporters/jaeger (#2362) * fix tests by pointing to the right owncloud/core commit id for tests (#2375) * add new file capabilties to ocs for the app provider (#2379) * Remove test from expected to fail and bump commit id (#2380) * add .drone.env to CODEOWNERS as it is part of the test files (#2378) * fix webdav copy for zero byte files (#2374) * Implement touch file (#2369) * implement cs3org/cs3apis#154 * use TouchFile for the app provider * add changelog and comments * revert use TouchFile in app provider * fix resource typo Co-authored-by: Giuseppe Lo Presti Co-authored-by: Giuseppe Lo Presti * Dummy implementation of the Lock CS3APIs (#2350) * allow new file create with app provider on public links (#2385) * Bump core commit id and use core master for tests (#2391) * Add product to ocs Version struct (#2397) The web ui will announce the backend version in the javascript console and is supposed to include the product name as well. The version seems to be a good location for the product field as it already includes the software edition as well. * bump core commit id for tests (#2404) * [Build-deps]: Bump github.com/mattn/go-sqlite3 from 1.14.9 to 1.14.10 (#2409) * [Build-deps]: Bump github.com/minio/minio-go/v7 from 7.0.18 to 7.0.20 (#2408) * [Build-deps]: Bump github.com/rs/cors from 1.8.0 to 1.8.2 (#2399) * [Build-deps]: Bump github.com/ReneKroon/ttlcache/v2 (#2387) * [Build-deps]: Bump go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc (#2359) * [tests-only] format .drone.star (#2411) * update tus/tusd to version 1.8.0 (#2393) * Fixes for apps in public shares, project spaces for EOS driver (#2371) * [Build-deps]: Bump github.com/aws/aws-sdk-go from 1.42.19 to 1.42.27 (#2414) * [Build-deps]: Bump github.com/rs/zerolog from 1.26.0 to 1.26.1 (#2388) * update owncloud core commit id (#2418) * [Build-deps]: Bump github.com/mattn/go-sqlite3 (#2425) * [Build-deps]: Bump github.com/gomodule/redigo from 1.8.6 to 1.8.8 (#2426) * OIDC and WOPI changes for lightweight users (#2278) * [tests-only]Bump the commit id for tests (#2441) * Bump the commit id for tests * Adding failing tests to expected to failure * CephFS Reva v0.2 (#1209) * [Build-deps]: Bump github.com/minio/minio-go/v7 from 7.0.20 to 7.0.21 (#2449) * [Build-deps]: Bump github.com/hashicorp/go-hclog from 1.0.0 to 1.1.0 (#2448) * [Build-deps]: Bump github.com/BurntSushi/toml from 0.4.1 to 1.0.0 (#2446) * revert go-sqlite downgrade (#2461) * [Build-deps]: Bump github.com/google/go-cmp from 0.5.6 to 0.5.7 (#2466) * [Build-deps]: Bump github.com/aws/aws-sdk-go from 1.42.27 to 1.42.39 (#2467) * [Build-deps]: Bump github.com/ceph/go-ceph from 0.12.0 to 0.13.0 (#2468) * [Build-deps]: Bump github.com/onsi/gomega from 1.17.0 to 1.18.0 (#2469) * Use permissions API in decomposedfs (#2341) * [tests-only]Bump Core Commit Id (#2451) * Bump commit id for tests 2022-01-25 (#2474) * Bump commit id for issue-ocis-3030 (#2476) * fix test failures * fix integration tests Co-authored-by: Giuseppe Lo Presti Co-authored-by: Gianmaria Del Monte <39946305+gmgigi96@users.noreply.github.com> Co-authored-by: Swikriti Tripathi <41103328+SwikritiT@users.noreply.github.com> Co-authored-by: Ishank Arora Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Willy Kloucek <34452982+wkloucek@users.noreply.github.com> Co-authored-by: Michael Barz Co-authored-by: Phil Davis Co-authored-by: Benedikt Kulmann Co-authored-by: Saw-jan Gurung Co-authored-by: PKiran <39373750+kiranparajuli589@users.noreply.github.com> Co-authored-by: Mouratidis Theofilos Co-authored-by: Amrita <54478846+amrita-shrestha@users.noreply.github.com> commit 2329e29240769bc103be3fea66c8a1bd2fde7cd7 Author: Amrita <54478846+amrita-shrestha@users.noreply.github.com> Date: Wed Jan 26 15:54:59 2022 +0545 Bump commit id for ocis issue 3030 (#2477) commit 14884122ece21ab516dbecfd097c0ae96dda9236 Author: Phil Davis Date: Wed Jan 26 10:26:56 2022 +0545 Bump commit id for tests 2022-01-25 (edge) (#2475) commit 2a46af98d56c96d578c22009659c07d056cfe9c3 Author: Amrita <54478846+amrita-shrestha@users.noreply.github.com> Date: Mon Jan 24 20:56:46 2022 +0545 [tests-only]Bump the commit id for tests (#2453) * Bump the commit id for tests * skip personalSpace tests for now * skip issue-ocis-3023 tests to avoid infinite loop Co-authored-by: Phil Davis commit bc20acb6d9f59f26223a595ec8c4422224529032 Author: kobergj Date: Fri Jan 21 14:48:12 2022 +0100 Space grants (#2464) * send spacegrants and pass them to decomposedfs Signed-off-by: jkoberg * add changelog item Signed-off-by: jkoberg commit d07d63e68a2559d1a576299de04287d2ba5069b4 Author: Andre Duffeck Date: Fri Jan 21 12:28:11 2022 +0100 Do not log nodes (#2463) * Do not log whole nodes It turns out that logging whole node objects is very expensive and also spams the logs quite a bit. Instead we just log the node ID now. * Add changelog commit c68b5cd81e76a1a4be6f3dc959ef5f6374d44682 Author: Willy Kloucek <34452982+wkloucek@users.noreply.github.com> Date: Fri Jan 21 09:19:24 2022 +0100 revert downgrade of mattn/go-sqlite (#2462) commit a799b5c15f8e98d3bc8ad7390a3fe80e58371dc8 Author: kobergj Date: Thu Jan 20 16:47:28 2022 +0100 Make gateway dumb again (#2437) * make StatHandler dumb again Signed-off-by: jkoberg * add changelog Signed-off-by: jkoberg * use findAndUnwrap instead find Signed-off-by: jkoberg * kinda fix integration tests Signed-off-by: jkoberg * remove ListContainer logic Signed-off-by: jkoberg * decomposedfs: don't check id's containing "/" Signed-off-by: jkoberg * fix linting and integration tests Signed-off-by: jkoberg * make ListRecycle dumb again Signed-off-by: jkoberg * make RestoreRecycleItem dumb again Signed-off-by: jkoberg * don't allow cross storage restore Signed-off-by: jkoberg * make Move dumb again Signed-off-by: jkoberg * make GetPath dumb again Signed-off-by: jkoberg * try changing dav report response Signed-off-by: jkoberg * add missing import Signed-off-by: jkoberg * blind mans fix for favorites Signed-off-by: jkoberg * remove commented code and nasty bug Signed-off-by: jkoberg * Update internal/http/services/owncloud/ocdav/propfind/propfind.go Co-authored-by: Jörn Friedrich Dreyer * Update internal/http/services/owncloud/ocdav/report.go Co-authored-by: Jörn Friedrich Dreyer * Update internal/http/services/owncloud/ocdav/report.go Co-authored-by: Jörn Friedrich Dreyer Co-authored-by: Jörn Friedrich Dreyer commit 0880fa870e77b31806125a4f52d7bc4421776a05 Author: David Christofas Date: Thu Jan 20 16:46:49 2022 +0100 prevent purging of enabled spaces (#2459) commit a955d6269826c09ee5205bda09e65e69449b57b4 Author: Jörn Friedrich Dreyer Date: Thu Jan 20 13:03:48 2022 +0100 decomposedfs: do not swallow errors when creating nodes (#2457) Signed-off-by: Jörn Friedrich Dreyer commit 4f8489635b4964ebb8005ec11ce3395f3aa16de2 Author: Michael Barz Date: Wed Jan 19 11:27:16 2022 +0100 fix path construction in webdav propfind (#2454) commit 53037a0313fffbe40ae818d6464f336e1aedbcb9 Author: Jörn Friedrich Dreyer Date: Tue Jan 18 11:41:40 2022 +0100 fix create space error message (#2452) Signed-off-by: Jörn Friedrich Dreyer commit bb960af361157f985542075d4801545f5fddfe27 Author: David Christofas Date: Mon Jan 17 17:43:34 2022 +0100 Purge spaces (#2431) * purge shares when purging storage spaces When purging a space we also want to delete all shares in that space. This is a first naive implementation for that but ideally we want to solve the with an event system eventually to decouple the services. * purge spaces in the storage driver Spaces can now be purged in a two step process. The code currently doesn't purge the blobs though. * implement review remarks * prevent normal users from listing deleted spaces * refactor share storage id filter * implement review remarks * list correct number of trashed spaces Signed-off-by: Jörn Friedrich Dreyer Co-authored-by: Jörn Friedrich Dreyer commit b061960108d7989a6d5dee647dc5f53db8388368 Author: kobergj Date: Fri Jan 14 11:48:03 2022 +0100 Fix publiclinks and decomposedfs (#2445) * decomposedfs: don't check id's containing "/" Signed-off-by: jkoberg * add changelog Signed-off-by: jkoberg commit c4d0c649aec2f4d6e058ca1be300fee0bf744897 Author: Swikriti Tripathi <41103328+SwikritiT@users.noreply.github.com> Date: Fri Jan 14 10:59:10 2022 +0545 [tests-only]Bump the commit id for tests edge (#2442) * Bump the commit id for tests * Adding failing tests to expected to failure commit c7397132f361d2fe3edc297422f1c8a08d822a05 Author: kobergj Date: Thu Jan 13 16:59:33 2022 +0100 fix statcache logic (#2440) Signed-off-by: jkoberg commit 1ed9c9f79af246ec500c3c14e6979ec974002840 Author: Jörn Friedrich Dreyer Date: Thu Jan 13 16:58:47 2022 +0100 ignore handled errors when creating spaces (#2439) Signed-off-by: Jörn Friedrich Dreyer commit 8a956f39f80c95f1f31519875d3ad6ea93d6489e Author: Ralf Haferkamp Date: Wed Jan 12 20:16:39 2022 +0100 Adjust "groupfilter" to be able to search by member name (#2436) Previously the input for the LDAP Groupfilter to lookup all groups a specific user is member of was the userpb.UserId part of the User object. I.e. it assumed we could run a single LDAP query to get all groups a user is member of by specifying the userid. However most LDAP Servers store the GroupMembership by either username (e.g. in memberUID Attribute) or by the user's DN (e.g. in member/uniqueMember). The GetUserGroups method was already updated recently to do a two-staged lookup (first lookup the user's name by Id then search the Groups by username). This change just removes the userpb.UserId template processing from the GroupFilter and replaces it with a single string (the username) to get rid of the annoying `{{.}}` template values in the config. In the future we should add a config switch to also allow lookups by member DN. commit 626d28adaa30f96836a531cf79a9b216f0f21caa Author: kobergj Date: Wed Jan 12 16:57:28 2022 +0100 [tests-only] Merge master into edge (#2435) * [Build-deps] Additional rules for CODEOWNERS (#2323) * Remove share refs from trashbin (#2298) * Public link propfind (#2315) * fix public share type in propfinds (#2316) * Bump core commit id for tests (#2331) * Revert "Fix content disposition (#2303)" (#2332) This reverts commit 3cba22371b78213f2e49197c2783220331a264bd. * [Build-deps]: Bump github.com/gomodule/redigo from 1.8.5 to 1.8.6 (#2326) * [Build-deps]: Bump github.com/mitchellh/mapstructure from 1.4.2 to 1.4.3 (#2324) * [Build-deps]: Bump github.com/aws/aws-sdk-go from 1.42.9 to 1.42.19 (#2325) * fix app provider new file action and improve app provider error codes (#2210) * Parse URL path to determine file name (#2346) * v1.17.0 * handle non existent spaces gracefully (#2354) * Bump core commit id for tests (#2365) * [Build-deps]: Bump github.com/minio/minio-go/v7 from 7.0.16 to 7.0.18 (#2363) * [Build-deps]: Bump github.com/ReneKroon/ttlcache/v2 from 2.9.0 to 2.10.0 (#2358) * [Build-deps]: Bump go.opentelemetry.io/otel/exporters/jaeger (#2362) * fix tests by pointing to the right owncloud/core commit id for tests (#2375) * add new file capabilties to ocs for the app provider (#2379) * Remove test from expected to fail and bump commit id (#2380) * add .drone.env to CODEOWNERS as it is part of the test files (#2378) * fix webdav copy for zero byte files (#2374) * Implement touch file (#2369) * implement cs3org/cs3apis#154 * use TouchFile for the app provider * add changelog and comments * revert use TouchFile in app provider * fix resource typo Co-authored-by: Giuseppe Lo Presti Co-authored-by: Giuseppe Lo Presti * Dummy implementation of the Lock CS3APIs (#2350) * allow new file create with app provider on public links (#2385) * Bump core commit id and use core master for tests (#2391) * Add product to ocs Version struct (#2397) The web ui will announce the backend version in the javascript console and is supposed to include the product name as well. The version seems to be a good location for the product field as it already includes the software edition as well. * bump core commit id for tests (#2404) * [Build-deps]: Bump github.com/mattn/go-sqlite3 from 1.14.9 to 1.14.10 (#2409) * [Build-deps]: Bump github.com/minio/minio-go/v7 from 7.0.18 to 7.0.20 (#2408) * [Build-deps]: Bump github.com/rs/cors from 1.8.0 to 1.8.2 (#2399) * [Build-deps]: Bump github.com/ReneKroon/ttlcache/v2 (#2387) * [Build-deps]: Bump go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc (#2359) * [tests-only] format .drone.star (#2411) * update tus/tusd to version 1.8.0 (#2393) * Fixes for apps in public shares, project spaces for EOS driver (#2371) * [Build-deps]: Bump github.com/aws/aws-sdk-go from 1.42.19 to 1.42.27 (#2414) * [Build-deps]: Bump github.com/rs/zerolog from 1.26.0 to 1.26.1 (#2388) * update owncloud core commit id (#2418) * [Build-deps]: Bump github.com/mattn/go-sqlite3 (#2425) * [Build-deps]: Bump github.com/gomodule/redigo from 1.8.6 to 1.8.8 (#2426) * OIDC and WOPI changes for lightweight users (#2278) * don't create references in gateway Signed-off-by: jkoberg * don't run virtual views testsuite Signed-off-by: jkoberg * bring back token scope expanding Signed-off-by: jkoberg Co-authored-by: Giuseppe Lo Presti Co-authored-by: Gianmaria Del Monte <39946305+gmgigi96@users.noreply.github.com> Co-authored-by: David Christofas Co-authored-by: Swikriti Tripathi <41103328+SwikritiT@users.noreply.github.com> Co-authored-by: Ishank Arora Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Willy Kloucek <34452982+wkloucek@users.noreply.github.com> Co-authored-by: Michael Barz Co-authored-by: Phil Davis Co-authored-by: Benedikt Kulmann Co-authored-by: Saw-jan Gurung Co-authored-by: PKiran <39373750+kiranparajuli589@users.noreply.github.com> commit 6b8e690d4119546e80d9bc851dccf900ad9a8fdd Author: Andre Duffeck Date: Wed Jan 12 09:50:26 2022 +0100 Start splitting up ocdav (#2434) * Start splitting up ocdav into smaller chunks That increases clarity and allows for making things testable. * Add a basic propfind unit test * Fix linter and hound issues * Add changelog commit 9e2e91c6ca4eab7178a535998554dba2f5d66f56 Author: Jörn Friedrich Dreyer Date: Tue Jan 11 17:03:07 2022 +0100 fix shares provider filter (#2433) Signed-off-by: Jörn Friedrich Dreyer commit 5c6d9497ee647310e6e7a49e56f2abdecc6ded6b Author: Jörn Friedrich Dreyer Date: Tue Jan 11 16:03:47 2022 +0100 use space reference when listing containers (#2432) Signed-off-by: Jörn Friedrich Dreyer commit 2469457aaf3f69803df0d44a31c1350a22721efd Author: David Christofas Date: Wed Jan 12 17:15:05 2022 +0100 store nodes of a space inside of the space This is still work in progress. In this state the node_test.go run successfully. refactor lookup and Pathify Signed-off-by: Jörn Friedrich Dreyer add changelog Signed-off-by: Jörn Friedrich Dreyer add missing lookup package Signed-off-by: Jörn Friedrich Dreyer wipe spaces after tests Signed-off-by: Jörn Friedrich Dreyer fix tests Signed-off-by: Jörn Friedrich Dreyer drop lookup RootNode() Signed-off-by: Jörn Friedrich Dreyer make spaces litmus pick up correct id Signed-off-by: Jörn Friedrich Dreyer fix more tests Signed-off-by: Jörn Friedrich Dreyer cosmetic fixes Signed-off-by: Jörn Friedrich Dreyer fix lock tests Signed-off-by: Jörn Friedrich Dreyer fix fileid Signed-off-by: Jörn Friedrich Dreyer fix list recycle item Signed-off-by: Jörn Friedrich Dreyer fix trash depth infinity Signed-off-by: Jörn Friedrich Dreyer fix trash restore Signed-off-by: Jörn Friedrich Dreyer fix restore propagation Signed-off-by: Jörn Friedrich Dreyer linter happyness Signed-off-by: Jörn Friedrich Dreyer fix build Signed-off-by: Jörn Friedrich Dreyer fix restore child item Signed-off-by: Jörn Friedrich Dreyer update expected failures Signed-off-by: Jörn Friedrich Dreyer only root nodes may have an owner Signed-off-by: Jörn Friedrich Dreyer fix trash Signed-off-by: Jörn Friedrich Dreyer fix tree tests Signed-off-by: Jörn Friedrich Dreyer check CreateContainer permission when creating space Signed-off-by: Jörn Friedrich Dreyer add create-space permission check Signed-off-by: Jörn Friedrich Dreyer happy mocking Signed-off-by: Jörn Friedrich Dreyer add license header Signed-off-by: Jörn Friedrich Dreyer permissions demo: add create-space Signed-off-by: Jörn Friedrich Dreyer linter Signed-off-by: Jörn Friedrich Dreyer use dedicated permissionssvc config Signed-off-by: Jörn Friedrich Dreyer linter Signed-off-by: Jörn Friedrich Dreyer actually test nextcloud driver m( Signed-off-by: Jörn Friedrich Dreyer integration tests: use permissionssvc Signed-off-by: Jörn Friedrich Dreyer update integration tests Signed-off-by: Jörn Friedrich Dreyer integration tests Signed-off-by: Jörn Friedrich Dreyer fix storage integration ocis versions Signed-off-by: Jörn Friedrich Dreyer demo permissions: allow create space Signed-off-by: Jörn Friedrich Dreyer fix integration tests Signed-off-by: Jörn Friedrich Dreyer linter, remove dead code Signed-off-by: Jörn Friedrich Dreyer --- .drone.star | 8 +- .../decomposedfs-shard-nodes-per-space.md | 5 + pkg/permission/manager/demo/demo.go | 18 +- pkg/permission/permission.go | 7 + .../fs/nextcloud/nextcloud_server_mock.go | 4 +- .../utils/decomposedfs/decomposedfs.go | 125 +++---- pkg/storage/utils/decomposedfs/grants.go | 15 +- pkg/storage/utils/decomposedfs/grants_test.go | 7 +- .../utils/decomposedfs/{ => lookup}/lookup.go | 60 ++-- .../decomposedfs/{ => lookup}/lookup_test.go | 2 +- .../mocks/CS3PermissionsClient.go | 66 ++++ pkg/storage/utils/decomposedfs/node/locks.go | 3 +- .../utils/decomposedfs/node/locks_test.go | 6 +- pkg/storage/utils/decomposedfs/node/node.go | 291 +++++++++------ .../utils/decomposedfs/node/node_test.go | 14 +- .../utils/decomposedfs/node/permissions.go | 47 +-- .../utils/decomposedfs/options/options.go | 8 +- pkg/storage/utils/decomposedfs/recycle.go | 169 ++++----- .../utils/decomposedfs/recycle_test.go | 25 +- pkg/storage/utils/decomposedfs/revisions.go | 27 +- pkg/storage/utils/decomposedfs/spaces.go | 300 +++++++++------- .../utils/decomposedfs/testhelpers/helpers.go | 75 ++-- pkg/storage/utils/decomposedfs/tree/tree.go | 336 +++++++++--------- .../utils/decomposedfs/tree/tree_test.go | 19 +- pkg/storage/utils/decomposedfs/upload.go | 30 +- pkg/storage/utils/decomposedfs/upload_test.go | 34 +- .../expected-failures-on-OCIS-storage.md | 2 - .../expected-failures-on-S3NG-storage.md | 2 - .../grpc/fixtures/storageprovider-ocis.toml | 3 +- .../grpc/gateway_storageprovider_test.go | 40 +-- tests/integration/grpc/grpc_suite_test.go | 4 +- .../integration/grpc/storageprovider_test.go | 21 +- .../drone/storage-users-0-9.toml | 2 + .../drone/storage-users-a-f.toml | 2 + .../drone/storage-users-ocis.toml | 4 +- .../drone/storage-users-s3ng.toml | 2 + .../local/ldap-users.toml | 6 +- .../local/storage-users-0-9.toml | 2 + .../local/storage-users-a-f.toml | 2 + .../local/storage-users.toml | 3 +- 40 files changed, 941 insertions(+), 855 deletions(-) create mode 100644 changelog/unreleased/decomposedfs-shard-nodes-per-space.md rename pkg/storage/utils/decomposedfs/{ => lookup}/lookup.go (79%) rename pkg/storage/utils/decomposedfs/{ => lookup}/lookup_test.go (99%) create mode 100644 pkg/storage/utils/decomposedfs/mocks/CS3PermissionsClient.go diff --git a/.drone.star b/.drone.star index cbbe9330d3..5cd04df9f7 100644 --- a/.drone.star +++ b/.drone.star @@ -572,7 +572,7 @@ def virtualViews(): "PATH_TO_CORE": "/drone/src/tmp/testrunner", "TEST_SERVER_URL": "http://revad-services:20180", "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/*", + "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/spaces/* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spacetypes/*", "STORAGE_DRIVER": "OCIS", "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", "TEST_REVA": "true", @@ -751,7 +751,7 @@ def litmusOcisSpacesDav(): "commands": [ # The spaceid is randomly generated during the first login so we need this hack to construct the correct url. "curl -s -k -u einstein:relativity -I http://revad-services:20080/remote.php/dav/files/einstein", - "export LITMUS_URL=http://revad-services:20080/remote.php/dav/spaces/$(ls /drone/src/tmp/reva/data/spaces/personal/)", + "export LITMUS_URL=http://revad-services:20080/remote.php/dav/spaces/$(ls /drone/src/tmp/reva/data/spacetypes/personal/)", "/usr/local/bin/litmus-wrapper", ], }, @@ -813,7 +813,7 @@ def ocisIntegrationTests(parallelRuns, skipExceptParts = []): "environment": { "TEST_SERVER_URL": "http://revad-services:20080", "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spaces/*/*", + "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/spaces/* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spacetypes/*", "STORAGE_DRIVER": "OCIS", "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", "TEST_WITH_LDAP": "true", @@ -890,7 +890,7 @@ def s3ngIntegrationTests(parallelRuns, skipExceptParts = []): "environment": { "TEST_SERVER_URL": "http://revad-services:20080", "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spaces/*/*", + "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/spaces/* /drone/src/tmp/reva/data/blobs/* /drone/src/tmp/reva/data/spacetypes/*", "STORAGE_DRIVER": "S3NG", "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", "TEST_WITH_LDAP": "true", diff --git a/changelog/unreleased/decomposedfs-shard-nodes-per-space.md b/changelog/unreleased/decomposedfs-shard-nodes-per-space.md new file mode 100644 index 0000000000..be6f6ae86c --- /dev/null +++ b/changelog/unreleased/decomposedfs-shard-nodes-per-space.md @@ -0,0 +1,5 @@ +Change: shard nodes per space in decomposedfs + +The decomposedfs changas the on disk layout to shard nodes per space. + +https://github.com/cs3org/reva/pull/2554 diff --git a/pkg/permission/manager/demo/demo.go b/pkg/permission/manager/demo/demo.go index eb9da7544d..06a23ea6a0 100644 --- a/pkg/permission/manager/demo/demo.go +++ b/pkg/permission/manager/demo/demo.go @@ -36,8 +36,18 @@ func New(c map[string]interface{}) (permission.Manager, error) { type manager struct { } -func (m manager) CheckPermission(permission string, subject string, ref *provider.Reference) bool { - // We can currently return true all the time. - // Once we beginn testing roles we need to somehow check the roles of the users here - return false +func (m manager) CheckPermission(perm string, subject string, ref *provider.Reference) bool { + switch perm { + case permission.CreateSpace: + // TODO Users can only create their own personal space + // TODO guest accounts cannot create spaces + return true + case permission.ListAllSpaces: + // TODO introduce an admin role to allow listing all spaces + return false + default: + // We can currently return false all the time. + // Once we beginn testing roles we need to somehow check the roles of the users here + return false + } } diff --git a/pkg/permission/permission.go b/pkg/permission/permission.go index e5e5c76a52..6aaa808845 100644 --- a/pkg/permission/permission.go +++ b/pkg/permission/permission.go @@ -22,6 +22,13 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" ) +const ( + // ListAllSpaces is the hardcoded name for the list all spaces permission + ListAllSpaces string = "list-all-spaces" + // CreateSpace is the hardcoded name for the create space permission + CreateSpace string = "create-space" +) + // Manager defines the interface for the permission service driver type Manager interface { CheckPermission(permission string, subject string, ref *provider.Reference) bool diff --git a/pkg/storage/fs/nextcloud/nextcloud_server_mock.go b/pkg/storage/fs/nextcloud/nextcloud_server_mock.go index db514cbb5c..74cb43d2a2 100644 --- a/pkg/storage/fs/nextcloud/nextcloud_server_mock.go +++ b/pkg/storage/fs/nextcloud/nextcloud_server_mock.go @@ -66,6 +66,7 @@ var responses = map[string]Response{ `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateHome `: {200, ``, serverStateHome}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateHome {}`: {200, ``, serverStateHome}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateStorageSpace {"owner":{"id":{"idp":"0.0.0.0:19000","opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c","type":1},"username":"einstein"},"type":"personal","name":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"}`: {200, `{"status":{"code":1}}`, serverStateHome}, + `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateStorageSpace {"owner":{"id":{"idp":"0.0.0.0:19000","opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c","type":1},"username":"einstein"},"type":"personal"}`: {200, `{"status":{"code":1}}`, serverStateHome}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/CreateReference {"path":"/Shares/reference","url":"scheme://target"}`: {200, `[]`, serverStateReference}, @@ -103,7 +104,8 @@ var responses = map[string]Response{ `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/GetPathByID {"storage_id":"00000000-0000-0000-0000-000000000000","opaque_id":"fileid-/some/path"} EMPTY`: {200, "/subdir", serverStateEmpty}, - `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"path":"/file"},"uploadLength":0,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty}, + `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"path":"/file"},"uploadLength":0,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty}, + `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/InitiateUpload {"ref":{"resource_id":{"storage_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"path":"/versionedFile"},"uploadLength":0,"metadata":{}}`: {200, `{"simple": "yes","tus": "yes"}`, serverStateEmpty}, `POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListFolder {"ref":{"path":"/"},"mdKeys":null}`: {200, `[{"opaque":{},"type":2,"id":{"opaque_id":"fileid-/subdir"},"checksum":{},"etag":"deadbeef","mime_type":"text/plain","mtime":{"seconds":1234567890},"path":"/subdir","permission_set":{},"size":12345,"canonical_metadata":{},"owner":{"opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"arbitrary_metadata":{"metadata":{"da":"ta","some":"arbi","trary":"meta"}}}]`, serverStateEmpty}, diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index 7c7c0dce3a..f474e1429e 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -19,6 +19,7 @@ package decomposedfs // go:generate mockery -name PermissionsChecker +// go:generate mockery -name CS3PermissionsClient // go:generate mockery -name Tree import ( @@ -33,15 +34,17 @@ import ( "strings" "syscall" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" + rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/sharedconf" + "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" @@ -51,6 +54,7 @@ import ( "github.com/cs3org/reva/pkg/utils" "github.com/pkg/errors" "go.opentelemetry.io/otel/codes" + "google.golang.org/grpc" ) // PermissionsChecker defines an interface for checking permissions on a Node @@ -59,9 +63,14 @@ type PermissionsChecker interface { HasPermission(ctx context.Context, n *node.Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) } +// CS3PermissionsClient defines an interface for checking permissions against the CS3 permissions service +type CS3PermissionsClient interface { + CheckPermission(ctx context.Context, in *cs3permissions.CheckPermissionRequest, opts ...grpc.CallOption) (*cs3permissions.CheckPermissionResponse, error) +} + // Tree is used to manage a tree hierarchy type Tree interface { - Setup(owner *userpb.UserId, propagateToRoot bool) error + Setup() error GetMD(ctx context.Context, node *node.Node) (os.FileInfo, error) ListFolder(ctx context.Context, node *node.Node) ([]*node.Node, error) @@ -82,11 +91,12 @@ type Tree interface { // Decomposedfs provides the base for decomposed filesystem implementations type Decomposedfs struct { - lu *Lookup - tp Tree - o *options.Options - p PermissionsChecker - chunkHandler *chunking.ChunkHandler + lu *lookup.Lookup + tp Tree + o *options.Options + p PermissionsChecker + chunkHandler *chunking.ChunkHandler + permissionsClient CS3PermissionsClient } // NewDefault returns an instance with default components @@ -96,30 +106,25 @@ func NewDefault(m map[string]interface{}, bs tree.Blobstore) (storage.FS, error) return nil, err } - lu := &Lookup{} + lu := &lookup.Lookup{} p := node.NewPermissions(lu) lu.Options = o tp := tree.New(o.Root, o.TreeTimeAccounting, o.TreeSizeAccounting, lu, bs) - o.GatewayAddr = sharedconf.GetGatewaySVC(o.GatewayAddr) - return New(o, lu, p, tp) -} + permissionsClient, err := pool.GetPermissionsClient(o.PermissionsSVC) + if err != nil { + return nil, err + } -// when enable home is false we want propagation to root if tree size or mtime accounting is enabled -func enablePropagationForRoot(o *options.Options) bool { - return (o.TreeSizeAccounting || o.TreeTimeAccounting) + return New(o, lu, p, tp, permissionsClient) } // New returns an implementation of the storage.FS interface that talks to // a local filesystem. -func New(o *options.Options, lu *Lookup, p PermissionsChecker, tp Tree) (storage.FS, error) { - err := tp.Setup(&userpb.UserId{ - OpaqueId: o.Owner, - Idp: o.OwnerIDP, - Type: userpb.UserType(userpb.UserType_value[o.OwnerType]), - }, enablePropagationForRoot(o)) +func New(o *options.Options, lu *lookup.Lookup, p PermissionsChecker, tp Tree, permissionsClient CS3PermissionsClient) (storage.FS, error) { + err := tp.Setup() if err != nil { logger.New().Error().Err(err). Msg("could not setup tree") @@ -127,11 +132,12 @@ func New(o *options.Options, lu *Lookup, p PermissionsChecker, tp Tree) (storage } return &Decomposedfs{ - tp: tp, - lu: lu, - o: o, - p: p, - chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), + tp: tp, + lu: lu, + o: o, + p: p, + chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), + permissionsClient: permissionsClient, }, nil } @@ -144,14 +150,12 @@ func (fs *Decomposedfs) Shutdown(ctx context.Context) error { // TODO Document in the cs3 should we return quota or free space? func (fs *Decomposedfs) GetQuota(ctx context.Context, ref *provider.Reference) (total uint64, inUse uint64, err error) { var n *node.Node - if ref != nil { - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return 0, 0, err - } - } else { - if n, err = fs.lu.RootNode(ctx); err != nil { - return 0, 0, err - } + if ref == nil { + err = errtypes.BadRequest("no space given") + return 0, 0, err + } + if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return 0, 0, err } if !n.Exists { @@ -204,57 +208,18 @@ func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) { return errtypes.NotSupported("Decomposedfs: CreateHome() home supported disabled") } - var n, h *node.Node - if n, err = fs.lu.RootNode(ctx); err != nil { - return - } - h, err = fs.lu.WalkPath(ctx, n, fs.lu.mustGetUserLayout(ctx), false, func(ctx context.Context, n *node.Node) error { - if !n.Exists { - if err := fs.tp.CreateDir(ctx, n); err != nil { - return err - } - } - return nil + u := ctxpkg.ContextMustGetUser(ctx) + res, err := fs.CreateStorageSpace(ctx, &provider.CreateStorageSpaceRequest{ + Type: spaceTypePersonal, + Owner: u, }) - - // make sure to delete the created directory if things go wrong - defer func() { - if err != nil { - // do not catch the error to not shadow the original error - if tmpErr := fs.tp.Delete(ctx, n); tmpErr != nil { - appctx.GetLogger(ctx).Error().Err(tmpErr).Msg("Can not revert file system change after error") - } - } - }() - if err != nil { - return - } - - // update the owner - u := ctxpkg.ContextMustGetUser(ctx) - if err = h.WriteAllNodeMetadata(u.Id); err != nil { - return - } - - if fs.o.TreeTimeAccounting || fs.o.TreeSizeAccounting { - // mark the home node as the end of propagation - if err = h.SetMetadata(xattrs.PropagationAttr, "1"); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", h).Msg("could not mark home as propagation root") - return - } - } - - if err := h.SetMetadata(xattrs.SpaceNameAttr, u.DisplayName); err != nil { return err } - - // add storage space - if err := fs.createStorageSpace(ctx, spaceTypePersonal, h.ID); err != nil { - return err + if res.Status.Code != rpcv1beta1.Code_CODE_OK { + return errtypes.NewErrtypeFromStatus(res.Status) } - - return + return nil } // The os not exists error is buried inside the xattr error, diff --git a/pkg/storage/utils/decomposedfs/grants.go b/pkg/storage/utils/decomposedfs/grants.go index 923a4478a1..40e6199500 100644 --- a/pkg/storage/utils/decomposedfs/grants.go +++ b/pkg/storage/utils/decomposedfs/grants.go @@ -56,16 +56,12 @@ func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g return err } - owner, err := node.Owner() - if err != nil { - return err - } - + owner := node.Owner() // If the owner is empty and there are no grantees then we are dealing with a just created project space. // In this case we don't need to check for permissions and just add the grant since this will be the project // manager. // When the owner is empty but grants are set then we do want to check the grants. - if !(len(grantees) == 0 && owner.OpaqueId == "") { + if !(len(grantees) == 0 && (owner == nil || owner.OpaqueId == "")) { ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 return rp.AddGrant || rp.UpdateGrant @@ -91,7 +87,7 @@ func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g // when a grant is added to a space, do not add a new space under "shares" if spaceGrant := ctx.Value(utils.SpaceGrant); spaceGrant == nil { - err := fs.createStorageSpace(ctx, spaceTypeShare, node.ID) + err := fs.linkStorageSpaceType(ctx, spaceTypeShare, node.ID) if err != nil { return err } @@ -122,7 +118,7 @@ func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference) } log := appctx.GetLogger(ctx) - np := fs.lu.InternalPath(node.ID) + np := node.InternalPath() var attrs []string if attrs, err = xattr.List(np); err != nil { log.Error().Err(err).Msg("error listing attributes") @@ -174,8 +170,7 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference attr = xattrs.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId } - np := fs.lu.InternalPath(node.ID) - if err = xattr.Remove(np, attr); err != nil { + if err = xattr.Remove(node.InternalPath(), attr); err != nil { return } diff --git a/pkg/storage/utils/decomposedfs/grants_test.go b/pkg/storage/utils/decomposedfs/grants_test.go index 4b28c740e0..b77bb49985 100644 --- a/pkg/storage/utils/decomposedfs/grants_test.go +++ b/pkg/storage/utils/decomposedfs/grants_test.go @@ -21,7 +21,6 @@ package decomposedfs_test import ( "io/fs" "os" - "path" "path/filepath" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" @@ -115,7 +114,7 @@ var _ = Describe("Grants", func() { err = env.Fs.AddGrant(env.Ctx, ref, grant) Expect(err).ToNot(HaveOccurred()) - localPath := path.Join(env.Root, "nodes", n.ID) + localPath := n.InternalPath() attr, err := xattr.Get(localPath, xattrs.GrantUserAcePrefix+grant.Grantee.GetUserId().OpaqueId) Expect(err).ToNot(HaveOccurred()) Expect(string(attr)).To(Equal("\x00t=A:f=:p=rw")) @@ -125,8 +124,8 @@ var _ = Describe("Grants", func() { err := env.Fs.AddGrant(env.Ctx, ref, grant) Expect(err).ToNot(HaveOccurred()) - spacesPath := filepath.Join(env.Root, "spaces") - tfs.root = spacesPath + spaceTypesPath := filepath.Join(env.Root, "spacetypes") + tfs.root = spaceTypesPath entries, err := fs.ReadDir(tfs, "share") Expect(err).ToNot(HaveOccurred()) Expect(len(entries)).To(BeNumerically(">=", 1)) diff --git a/pkg/storage/utils/decomposedfs/lookup.go b/pkg/storage/utils/decomposedfs/lookup/lookup.go similarity index 79% rename from pkg/storage/utils/decomposedfs/lookup.go rename to pkg/storage/utils/decomposedfs/lookup/lookup.go index 06017e96a2..38032a1acd 100644 --- a/pkg/storage/utils/decomposedfs/lookup.go +++ b/pkg/storage/utils/decomposedfs/lookup/lookup.go @@ -16,23 +16,20 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package decomposedfs +package lookup import ( "context" "fmt" - "os" "path/filepath" "strings" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/storage/utils/templates" ) // Lookup implements transformations from filepath to node and back @@ -58,6 +55,7 @@ func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) if err != nil { return nil, err } + n.SpaceID = ref.ResourceId.StorageId } } return n, nil @@ -76,32 +74,28 @@ func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *n // The Resource references the root of a space return lu.NodeFromSpaceID(ctx, id) } - n, err = node.ReadNode(ctx, lu, id.OpaqueId) - if err != nil { - return nil, err - } + return node.ReadNode(ctx, lu, id.StorageId, id.OpaqueId) +} - return n, n.FindStorageSpaceRoot() +// Pathify segments the beginning of a string into depth segments of width length +// Pathify("aabbccdd", 3, 1) will return "a/a/b/bccdd" +func Pathify(id string, depth, width int) string { + b := strings.Builder{} + i := 0 + for ; i < depth; i++ { + if len(id) <= i*width+width { + break + } + b.WriteString(id[i*width : i*width+width]) + b.WriteRune(filepath.Separator) + } + b.WriteString(id[i*width:]) + return b.String() } // NodeFromSpaceID converts a resource id without an opaque id into a Node func (lu *Lookup) NodeFromSpaceID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) { - d := filepath.Join(lu.Options.Root, "spaces", spaceTypeAny, id.StorageId) - matches, err := filepath.Glob(d) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return nil, fmt.Errorf("can't determine node from spaceID: found %d matching spaces. Path: %s", len(matches), d) - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - - node, err := node.ReadNode(ctx, lu, filepath.Base(target)) + node, err := node.ReadNode(ctx, lu, id.StorageId, id.StorageId) if err != nil { return nil, err } @@ -128,13 +122,6 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node) (p string, err error) return } -// RootNode returns the root node of the storage -func (lu *Lookup) RootNode(ctx context.Context) (*node.Node, error) { - n := node.New(node.RootID, "", "", 0, "", nil, lu) - n.Exists = true - return n, nil -} - // WalkPath calls n.Child(segment) on every path segment in p starting at the node r. // If a function f is given it will be executed for every segment node, but not the root node r. // If followReferences is given the current visited reference node is replaced by the referenced node. @@ -182,13 +169,8 @@ func (lu *Lookup) InternalRoot() string { } // InternalPath returns the internal path for a given ID -func (lu *Lookup) InternalPath(id string) string { - return filepath.Join(lu.Options.Root, "nodes", id) -} - -func (lu *Lookup) mustGetUserLayout(ctx context.Context) string { - u := ctxpkg.ContextMustGetUser(ctx) - return templates.WithUser(u, lu.Options.UserLayout) +func (lu *Lookup) InternalPath(spaceID, nodeID string) string { + return filepath.Join(lu.Options.Root, "spaces", Pathify(spaceID, 1, 2), "nodes", Pathify(nodeID, 4, 2)) } // ShareFolder returns the internal storage root directory diff --git a/pkg/storage/utils/decomposedfs/lookup_test.go b/pkg/storage/utils/decomposedfs/lookup/lookup_test.go similarity index 99% rename from pkg/storage/utils/decomposedfs/lookup_test.go rename to pkg/storage/utils/decomposedfs/lookup/lookup_test.go index 2895445383..4faf6f58ac 100644 --- a/pkg/storage/utils/decomposedfs/lookup_test.go +++ b/pkg/storage/utils/decomposedfs/lookup/lookup_test.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package decomposedfs_test +package lookup_test import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" diff --git a/pkg/storage/utils/decomposedfs/mocks/CS3PermissionsClient.go b/pkg/storage/utils/decomposedfs/mocks/CS3PermissionsClient.go new file mode 100644 index 0000000000..c63573fce8 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/mocks/CS3PermissionsClient.go @@ -0,0 +1,66 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// Code generated by mockery v1.1.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + permissionsv1beta1 "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" +) + +// CS3PermissionsClient is an autogenerated mock type for the CS3PermissionsClient type +type CS3PermissionsClient struct { + mock.Mock +} + +// CheckPermission provides a mock function with given fields: ctx, in, opts +func (_m *CS3PermissionsClient) CheckPermission(ctx context.Context, in *permissionsv1beta1.CheckPermissionRequest, opts ...grpc.CallOption) (*permissionsv1beta1.CheckPermissionResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *permissionsv1beta1.CheckPermissionResponse + if rf, ok := ret.Get(0).(func(context.Context, *permissionsv1beta1.CheckPermissionRequest, ...grpc.CallOption) *permissionsv1beta1.CheckPermissionResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*permissionsv1beta1.CheckPermissionResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *permissionsv1beta1.CheckPermissionRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/storage/utils/decomposedfs/node/locks.go b/pkg/storage/utils/decomposedfs/node/locks.go index e0392364dd..4b882e1ef5 100644 --- a/pkg/storage/utils/decomposedfs/node/locks.go +++ b/pkg/storage/utils/decomposedfs/node/locks.go @@ -35,6 +35,7 @@ import ( // SetLock sets a lock on the node func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { + nodepath := n.LockFilePath() // check existing lock if l, _ := n.ReadLock(ctx); l != nil { @@ -65,7 +66,7 @@ func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { }() // O_EXCL to make open fail when the file already exists - f, err := os.OpenFile(n.LockFilePath(), os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0600) + f, err := os.OpenFile(nodepath, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { return errors.Wrap(err, "Decomposedfs: could not create lock file") } diff --git a/pkg/storage/utils/decomposedfs/node/locks_test.go b/pkg/storage/utils/decomposedfs/node/locks_test.go index d86c677ad2..5cd4dd0ad5 100644 --- a/pkg/storage/utils/decomposedfs/node/locks_test.go +++ b/pkg/storage/utils/decomposedfs/node/locks_test.go @@ -45,7 +45,7 @@ var _ = Describe("Node locks", func() { otherUser = &userpb.User{ Id: &userpb.UserId{ Idp: "idp", - OpaqueId: "foo", + OpaqueId: "otheruserid", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "foo", @@ -68,8 +68,8 @@ var _ = Describe("Node locks", func() { User: env.Owner.Id, LockId: uuid.New().String(), } - n = node.New("tobelockedid", "", "tobelocked", 10, "", env.Owner.Id, env.Lookup) - n2 = node.New("neverlockedid", "", "neverlocked", 10, "", env.Owner.Id, env.Lookup) + n = node.New("u-s-e-r-id", "tobelockedid", "", "tobelocked", 10, "", env.Owner.Id, env.Lookup) + n2 = node.New("u-s-e-r-id", "neverlockedid", "", "neverlocked", 10, "", env.Owner.Id, env.Lookup) }) AfterEach(func() { diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index 519c8e31d9..56ee9dc3ec 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -63,7 +63,8 @@ const ( QuotaUnlimited = "-3" // TrashIDDelimiter represents the characters used to separate the nodeid and the deletion time. - TrashIDDelimiter = ".T." + TrashIDDelimiter = ".T." + RevisionIDDelimiter = ".REV." // RootID defines the root node's ID RootID = "root" @@ -71,6 +72,7 @@ const ( // Node represents a node in the tree and provides methods to get a Parent or Child instance type Node struct { + SpaceID string ParentID string ID string Name string @@ -85,20 +87,19 @@ type Node struct { // PathLookup defines the interface for the lookup component type PathLookup interface { - RootNode(ctx context.Context) (node *Node, err error) - InternalRoot() string - InternalPath(ID string) string + InternalPath(spaceID, nodeID string) string Path(ctx context.Context, n *Node) (path string, err error) ShareFolder() string } // New returns a new instance of Node -func New(id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node { +func New(spaceID, id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node { if blobID == "" { blobID = uuid.New().String() } return &Node{ + SpaceID: spaceID, ID: id, ParentID: parentID, Name: name, @@ -111,14 +112,14 @@ func New(id, parentID, name string, blobsize int64, blobID string, owner *userpb // ChangeOwner sets the owner of n to newOwner func (n *Node) ChangeOwner(new *userpb.UserId) (err error) { - nodePath := n.InternalPath() - n.owner = new + rootNodePath := n.SpaceRoot.InternalPath() + n.SpaceRoot.owner = new var attribs = map[string]string{xattrs.OwnerIDAttr: new.OpaqueId, xattrs.OwnerIDPAttr: new.Idp, xattrs.OwnerTypeAttr: utils.UserTypeToString(new.Type)} - if err := xattrs.SetMultiple(nodePath, attribs); err != nil { + if err := xattrs.SetMultiple(rootNodePath, attribs); err != nil { return err } @@ -135,6 +136,18 @@ func (n *Node) SetMetadata(key string, val string) (err error) { return nil } +// RemoveMetadata removes a given key +func (n *Node) RemoveMetadata(key string) (err error) { + if err = xattr.Remove(n.InternalPath(), key); err != nil { + if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || + // darwin + e.Err.Error() == "attribute not found") { + return nil + } + } + return err +} + // GetMetadata reads the metadata for the given key func (n *Node) GetMetadata(key string) (val string, err error) { nodePath := n.InternalPath() @@ -145,7 +158,7 @@ func (n *Node) GetMetadata(key string) (val string, err error) { } // WriteAllNodeMetadata writes the Node metadata to disk -func (n *Node) WriteAllNodeMetadata(owner *userpb.UserId) (err error) { +func (n *Node) WriteAllNodeMetadata() (err error) { attribs := make(map[string]string) attribs[xattrs.ParentidAttr] = n.ParentID @@ -154,54 +167,70 @@ func (n *Node) WriteAllNodeMetadata(owner *userpb.UserId) (err error) { attribs[xattrs.BlobsizeAttr] = strconv.FormatInt(n.Blobsize, 10) nodePath := n.InternalPath() - attribs[xattrs.OwnerIDAttr] = "" - attribs[xattrs.OwnerIDPAttr] = "" - attribs[xattrs.OwnerTypeAttr] = "" + return xattrs.SetMultiple(nodePath, attribs) +} - if owner != nil { - attribs[xattrs.OwnerIDAttr] = owner.OpaqueId - attribs[xattrs.OwnerIDPAttr] = owner.Idp - attribs[xattrs.OwnerTypeAttr] = utils.UserTypeToString(owner.Type) +// WriteOwner writes the space owner +func (n *Node) WriteOwner(owner *userpb.UserId) error { + n.SpaceRoot.owner = owner + attribs := map[string]string{ + xattrs.OwnerIDAttr: owner.OpaqueId, + xattrs.OwnerIDPAttr: owner.Idp, + xattrs.OwnerTypeAttr: utils.UserTypeToString(owner.Type), } - if err := xattrs.SetMultiple(nodePath, attribs); err != nil { + nodeRootPath := n.SpaceRoot.InternalPath() + if err := xattrs.SetMultiple(nodeRootPath, attribs); err != nil { return err } - return + n.SpaceRoot.owner = owner + return nil } // ReadNode creates a new instance from an id and checks if it exists -func ReadNode(ctx context.Context, lu PathLookup, id string) (n *Node, err error) { - n = &Node{ - lu: lu, - ID: id, - } - - nodePath := n.InternalPath() +func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string) (n *Node, err error) { - // lookup parent id in extended attributes - var attr string - attr, err = xattrs.Get(nodePath, xattrs.ParentidAttr) + // read space root + r := &Node{ + SpaceID: spaceID, + lu: lu, + ID: spaceID, + } + r.SpaceRoot = r + r.owner, err = r.readOwner() switch { - case err == nil: - n.ParentID = attr - case xattrs.IsAttrUnset(err): - return nil, errtypes.InternalError(err.Error()) case xattrs.IsNotExist(err): - return n, nil // swallow not found, the node defaults to exists = false - default: - return nil, errtypes.InternalError(err.Error()) + // this returns tr + return r, nil + case err != nil: + return nil, err } + r.Exists = true // check if this is a space root - if _, err = xattrs.Get(nodePath, xattrs.SpaceNameAttr); err == nil { - n.SpaceRoot = n + if spaceID == nodeID { + return r, nil + } + + // read node + n = &Node{ + SpaceID: spaceID, + lu: lu, + ID: nodeID, + SpaceRoot: r, } + + nodePath := n.InternalPath() + + var attr string // lookup name in extended attributes if attr, err = xattrs.Get(nodePath, xattrs.NameAttr); err == nil { n.Name = attr } else { return } + + n.Exists = true + // lookup blobID in extended attributes if attr, err = xattrs.Get(nodePath, xattrs.BlobIDAttr); err == nil { n.BlobID = attr @@ -216,15 +245,48 @@ func ReadNode(ctx context.Context, lu PathLookup, id string) (n *Node, err error return } - // Check if parent exists. Otherwise this node is part of a deleted subtree - _, err = os.Stat(lu.InternalPath(n.ParentID)) + // lookup parent id in extended attributes + attr, err = xattrs.Get(nodePath, xattrs.ParentidAttr) + switch { + case err == nil: + n.ParentID = attr + case xattrs.IsAttrUnset(err): + return nil, errtypes.InternalError(err.Error()) + case xattrs.IsNotExist(err): + return n, nil // swallow not found, the node defaults to exists = false + default: + return nil, errtypes.InternalError(err.Error()) + } + + // TODO why do we stat the parent? to determine if the current node is in the trash we would need to traverse all parents... + // we need to traverse all parents for permissions anyway ... + // - we can compare to space root owner with the current user + // - we can compare the share permissions on the root for spaces, which would work for managers + // - for non managers / owners we need to traverse all path segments because an intermediate node might have been shared + // - if we want to support negative acls we need to traverse the path for all users (but the owner) + // for trashed items we need to check all parents + // - one of them might have the trash suffix ... + // - options: + // - move deleted nodes in a trash folder that is still part of the tree (aka freedesktop org trash spec) + // - shares should still be removed, which requires traversing all trashed children ... and it should be undoable ... + // - what if a trashed file is restored? will child items be accessible by a share? + // - compare paths of trash root items and the trashed file? + // - to determine the relative path of a file we would need to traverse all intermediate nodes anyway + // - recursively mark all children as trashed ... async ... it is ok when that is not synchronous + // - how do we pick up if an error occurs? write a journal somewhere? activity log / delta? + // - stat requests will not pick up trashed items at all + // - recursively move all children into the trash folder? + // - no need to write an additional trash entry + // - can be made more robust with a journal + // - same recursion mechanism can be used to purge items? sth we still need to do + _, err = os.Stat(n.ParentInternalPath()) if err != nil { if os.IsNotExist(err) { return nil, errtypes.NotFound(err.Error()) } return nil, err } - n.Exists = true + return } @@ -238,12 +300,30 @@ func isNotDir(err error) bool { return false } +func readChildNodeFromLink(path string) (string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", err + } + nodeID := strings.TrimLeft(link, "/.") + nodeID = strings.ReplaceAll(nodeID, "/", "") + return nodeID, nil +} + // Child returns the child node with the given name func (n *Node) Child(ctx context.Context, name string) (*Node, error) { - link, err := os.Readlink(filepath.Join(n.InternalPath(), filepath.Join("/", name))) + spaceID := n.SpaceID + if spaceID == "" && n.ParentID == "root" { + spaceID = n.ID + } else if n.SpaceRoot != nil { + spaceID = n.SpaceRoot.ID + } + nodeID, err := readChildNodeFromLink(filepath.Join(n.InternalPath(), name)) if err != nil { if os.IsNotExist(err) || isNotDir(err) { + c := &Node{ + SpaceID: spaceID, lu: n.lu, ParentID: n.ID, Name: name, @@ -256,15 +336,11 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) { } var c *Node - if strings.HasPrefix(link, "../") { - c, err = ReadNode(ctx, n.lu, filepath.Base(link)) - if err != nil { - return nil, errors.Wrap(err, "could not read child node") - } - c.SpaceRoot = n.SpaceRoot - } else { - return nil, fmt.Errorf("decomposedfs: expected '../ prefix, got' %+v", link) + c, err = ReadNode(ctx, n.lu, spaceID, nodeID) + if err != nil { + return nil, errors.Wrap(err, "could not read child node") } + c.SpaceRoot = n.SpaceRoot return c, nil } @@ -275,12 +351,14 @@ func (n *Node) Parent() (p *Node, err error) { return nil, fmt.Errorf("decomposedfs: root has no parent") } p = &Node{ + SpaceID: n.SpaceID, lu: n.lu, ID: n.ParentID, SpaceRoot: n.SpaceRoot, } - parentPath := n.lu.InternalPath(n.ParentID) + // parentPath := n.lu.InternalPath(spaceID, n.ParentID) + parentPath := p.InternalPath() // lookup parent id in extended attributes if p.ParentID, err = xattrs.Get(parentPath, xattrs.ParentidAttr); err != nil { @@ -301,59 +379,59 @@ func (n *Node) Parent() (p *Node, err error) { return } -// Owner returns the cached owner id or reads it from the extended attributes -// TODO can be private as only the AsResourceInfo uses it -func (n *Node) Owner() (*userpb.UserId, error) { - if n.owner != nil { - return n.owner, nil - } +// Owner returns the space owner +func (n *Node) Owner() *userpb.UserId { + return n.SpaceRoot.owner +} + +// readOwner reads the owner from the extended attributes of the space root +// in case either owner id or owner idp are unset we return an error and an empty owner object +func (n *Node) readOwner() (*userpb.UserId, error) { owner := &userpb.UserId{} - // FIXME ... do we return the owner of the reference or the owner of the target? - // we don't really know the owner of the target ... and as the reference may point anywhere we cannot really find out - // but what are the permissions? all? none? the gateway has to fill in? - // TODO what if this is a reference? - nodePath := n.InternalPath() + rootNodePath := n.SpaceRoot.InternalPath() // lookup parent id in extended attributes var attr string var err error // lookup ID in extended attributes - attr, err = xattrs.Get(nodePath, xattrs.OwnerIDAttr) + attr, err = xattrs.Get(rootNodePath, xattrs.OwnerIDAttr) switch { case err == nil: owner.OpaqueId = attr - case xattrs.IsAttrUnset(err), xattrs.IsNotExist(err): - fallthrough + case xattrs.IsAttrUnset(err): + // ignore default: return nil, err } // lookup IDP in extended attributes - attr, err = xattrs.Get(nodePath, xattrs.OwnerIDPAttr) + attr, err = xattrs.Get(rootNodePath, xattrs.OwnerIDPAttr) switch { case err == nil: owner.Idp = attr - case xattrs.IsAttrUnset(err), xattrs.IsNotExist(err): - fallthrough + case xattrs.IsAttrUnset(err): + // ignore default: return nil, err } // lookup type in extended attributes - attr, err = xattrs.Get(nodePath, xattrs.OwnerTypeAttr) + attr, err = xattrs.Get(rootNodePath, xattrs.OwnerTypeAttr) switch { case err == nil: owner.Type = utils.UserTypeMap(attr) - case xattrs.IsAttrUnset(err), xattrs.IsNotExist(err): - fallthrough + case xattrs.IsAttrUnset(err): + // ignore default: - // TODO the user type defaults to invalid, which is the case - err = nil + return nil, err } - n.owner = owner - return n.owner, err + // owner is an optional property + if owner.Idp == "" && owner.OpaqueId == "" { + return nil, nil + } + return owner, nil } // PermissionSet returns the permission set for the current user @@ -364,7 +442,7 @@ func (n *Node) PermissionSet(ctx context.Context) provider.ResourcePermissions { appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") return NoPermissions() } - if o, _ := n.Owner(); utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.SpaceRoot.Owner()) { return OwnerPermissions() } // read the permissions for the current user from the acls of the current node @@ -376,12 +454,17 @@ func (n *Node) PermissionSet(ctx context.Context) provider.ResourcePermissions { // InternalPath returns the internal path of the Node func (n *Node) InternalPath() string { - return n.lu.InternalPath(n.ID) + return n.lu.InternalPath(n.SpaceID, n.ID) +} + +// ParentInternalPath returns the internal path of the parent of the current node +func (n *Node) ParentInternalPath() string { + return n.lu.InternalPath(n.SpaceID, n.ParentID) } // LockFilePath returns the internal path of the lock file of the node func (n *Node) LockFilePath() string { - return n.lu.InternalPath(n.ID) + ".lock" + return n.InternalPath() + ".lock" } // CalculateEtag returns a hash of fileid + tmtime (or mtime) @@ -409,7 +492,7 @@ func calculateEtag(nodeID string, tmTime time.Time) (string, error) { func (n *Node) SetMtime(ctx context.Context, mtime string) error { sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() if mt, err := parseMTime(mtime); err == nil { - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() // updating mtime also updates atime if err := os.Chtimes(nodePath, mt, mt); err != nil { sublog.Error().Err(err). @@ -429,7 +512,7 @@ func (n *Node) SetMtime(ctx context.Context, mtime string) error { // SetEtag sets the temporary etag of a node if it differs from the current etag func (n *Node) SetEtag(ctx context.Context, val string) (err error) { sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() var tmTime time.Time if tmTime, err = n.GetTMTime(); err != nil { // no tmtime, use mtime @@ -474,7 +557,7 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) { // obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem // public tags can be mapped to extended attributes func (n *Node) SetFavorite(uid *userpb.UserId, val string) error { - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() // the favorite flag is specific to the user, so we need to incorporate the userid fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) return xattrs.Set(nodePath, fa, val) @@ -485,7 +568,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi sublog := appctx.GetLogger(ctx).With().Interface("node", n.ID).Logger() var fn string - nodePath := n.lu.InternalPath(n.ID) + nodePath := n.InternalPath() var fi os.FileInfo @@ -510,8 +593,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE } - // TODO ensure we always have a space root - id := &provider.ResourceId{StorageId: n.SpaceRoot.Name, OpaqueId: n.ID} + id := &provider.ResourceId{StorageId: n.SpaceID, OpaqueId: n.ID} if returnBasename { fn = n.Name @@ -530,6 +612,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi Size: uint64(n.Blobsize), Target: target, PermissionSet: rp, + Owner: n.Owner(), } if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER { @@ -542,10 +625,6 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi } } - if ri.Owner, err = n.Owner(); err != nil { - sublog.Debug().Err(err).Msg("could not determine owner") - } - // TODO make etag of files use fileid and checksum var tmTime time.Time @@ -628,19 +707,8 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi } // quota if _, ok := mdKeysMap[QuotaKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER) && returnAllKeys || ok { - var quotaPath string - if n.SpaceRoot == nil { - root, err := n.lu.RootNode(ctx) - if err == nil { - quotaPath = root.InternalPath() - } else { - sublog.Debug().Err(err).Msg("error determining the space root node for quota") - } - } else { - quotaPath = n.SpaceRoot.InternalPath() - } - if quotaPath != "" { - readQuotaIntoOpaque(ctx, quotaPath, ri) + if n.SpaceRoot != nil && n.SpaceRoot.InternalPath() != "" { + readQuotaIntoOpaque(ctx, n.SpaceRoot.InternalPath(), ri) } } @@ -752,7 +820,7 @@ func readQuotaIntoOpaque(ctx context.Context, nodePath string, ri *provider.Reso // HasPropagation checks if the propagation attribute exists and is set to "1" func (n *Node) HasPropagation() (propagation bool) { - if b, err := xattrs.Get(n.lu.InternalPath(n.ID), xattrs.PropagationAttr); err == nil { + if b, err := xattrs.Get(n.InternalPath(), xattrs.PropagationAttr); err == nil { return b == "1" } return false @@ -761,7 +829,7 @@ func (n *Node) HasPropagation() (propagation bool) { // GetTMTime reads the tmtime from the extended attributes func (n *Node) GetTMTime() (tmTime time.Time, err error) { var b string - if b, err = xattrs.Get(n.lu.InternalPath(n.ID), xattrs.TreeMTimeAttr); err != nil { + if b, err = xattrs.Get(n.InternalPath(), xattrs.TreeMTimeAttr); err != nil { return } return time.Parse(time.RFC3339Nano, b) @@ -769,7 +837,7 @@ func (n *Node) GetTMTime() (tmTime time.Time, err error) { // SetTMTime writes the tmtime to the extended attributes func (n *Node) SetTMTime(t time.Time) (err error) { - return xattrs.Set(n.lu.InternalPath(n.ID), xattrs.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) + return xattrs.Set(n.InternalPath(), xattrs.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) } // GetTreeSize reads the treesize from the extended attributes @@ -793,7 +861,7 @@ func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { // UnsetTempEtag removes the temporary etag attribute func (n *Node) UnsetTempEtag() (err error) { - if err = xattr.Remove(n.lu.InternalPath(n.ID), xattrs.TmpEtagAttr); err != nil { + if err = xattr.Remove(n.InternalPath(), xattrs.TmpEtagAttr); err != nil { if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || // darwin e.Err.Error() == "attribute not found") { @@ -806,20 +874,7 @@ func (n *Node) UnsetTempEtag() (err error) { // ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap provider.ResourcePermissions, err error) { // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Str("node", n.ID).Msg("could not determine owner, returning default permissions") - return NoPermissions(), err - } - if o.OpaqueId == "" { - // this happens for root nodes and project spaces in the storage. the extended attributes are set to emptystring to indicate: no owner - // for project spaces we need to go over the grants and check the granted permissions - if n.ID == RootID { - return NoOwnerPermissions(), nil - } - } - if utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.Owner()) { appctx.GetLogger(ctx).Debug().Str("node", n.ID).Msg("user is owner, returning owner permissions") return OwnerPermissions(), nil } diff --git a/pkg/storage/utils/decomposedfs/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_test.go index d1db620247..d8befdc85e 100644 --- a/pkg/storage/utils/decomposedfs/node/node_test.go +++ b/pkg/storage/utils/decomposedfs/node/node_test.go @@ -22,7 +22,6 @@ import ( "encoding/json" "time" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" @@ -56,8 +55,8 @@ var _ = Describe("Node", func() { Describe("New", func() { It("generates unique blob ids if none are given", func() { - n1 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) - n2 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) + n1 := node.New(env.SpaceRootRes.StorageId, id, "", name, 10, "", env.Owner.Id, env.Lookup) + n2 := node.New(env.SpaceRootRes.StorageId, id, "", name, 10, "", env.Owner.Id, env.Lookup) Expect(len(n1.BlobID)).To(Equal(36)) Expect(n1.BlobID).ToNot(Equal(n2.BlobID)) @@ -72,7 +71,7 @@ var _ = Describe("Node", func() { }) Expect(err).ToNot(HaveOccurred()) - n, err := node.ReadNode(env.Ctx, env.Lookup, lookupNode.ID) + n, err := node.ReadNode(env.Ctx, env.Lookup, lookupNode.SpaceID, lookupNode.ID) Expect(err).ToNot(HaveOccurred()) Expect(n.BlobID).To(Equal("file1-blobid")) }) @@ -91,13 +90,8 @@ var _ = Describe("Node", func() { n.Name = "TestName" n.BlobID = "TestBlobID" n.Blobsize = int64(blobsize) - owner := &userpb.UserId{ - Idp: "testidp", - OpaqueId: "testuserid", - Type: userpb.UserType_USER_TYPE_PRIMARY, - } - err = n.WriteAllNodeMetadata(owner) + err = n.WriteAllNodeMetadata() Expect(err).ToNot(HaveOccurred()) n2, err := env.Lookup.NodeFromResource(env.Ctx, ref) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/storage/utils/decomposedfs/node/permissions.go b/pkg/storage/utils/decomposedfs/node/permissions.go index ec0f8c8789..b70124c74e 100644 --- a/pkg/storage/utils/decomposedfs/node/permissions.go +++ b/pkg/storage/utils/decomposedfs/node/permissions.go @@ -100,21 +100,9 @@ func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap prov appctx.GetLogger(ctx).Debug().Interface("node", n.ID).Msg("no user in context, returning default permissions") return NoPermissions(), nil } + // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n.ID).Msg("could not determine owner, returning default permissions") - return NoPermissions(), err - } - if o.OpaqueId == "" { - // this happens for root nodes and project spaces in the storage. the extended attributes are set to emptystring to indicate: no owner - // for project spaces we need to go over the grants and check the granted permissions - if n.ID == RootID { - return NoOwnerPermissions(), nil - } - } - if utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.Owner()) { lp, err := n.lu.Path(ctx, n) if err == nil && lp == n.lu.ShareFolder() { return ShareFolderPermissions(), nil @@ -123,10 +111,8 @@ func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap prov return OwnerPermissions(), nil } // determine root - var rn *Node - if rn, err = p.lu.RootNode(ctx); err != nil { - return NoPermissions(), err - } + + rn := n.SpaceRoot cn := n @@ -189,9 +175,11 @@ func (p *Permissions) HasPermission(ctx context.Context, n *Node, check func(*pr } // determine root - if err = n.FindStorageSpaceRoot(); err != nil { - return false, err - } + /* + if err = n.FindStorageSpaceRoot(); err != nil { + return false, err + } + */ // for an efficient group lookup convert the list of groups to a map // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! @@ -271,22 +259,7 @@ func (p *Permissions) getUserAndPermissions(ctx context.Context, n *Node) (*user return nil, &perms } // check if the current user is the owner - o, err := n.Owner() - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n.ID).Msg("could not determine owner, returning default permissions") - perms := NoPermissions() - return nil, &perms - } - if o.OpaqueId == "" { - // this happens for root nodes and project spaces in the storage. the extended attributes are set to emptystring to indicate: no owner - // for project spaces we need to go over the grants and check the granted permissions - if n.ID != RootID { - return u, nil - } - perms := NoOwnerPermissions() - return nil, &perms - } - if utils.UserEqual(u.Id, o) { + if utils.UserEqual(u.Id, n.Owner()) { appctx.GetLogger(ctx).Debug().Str("node", n.ID).Msg("user is owner, returning owner permissions") perms := OwnerPermissions() return u, &perms diff --git a/pkg/storage/utils/decomposedfs/options/options.go b/pkg/storage/utils/decomposedfs/options/options.go index 846cc03913..765c0c0c53 100644 --- a/pkg/storage/utils/decomposedfs/options/options.go +++ b/pkg/storage/utils/decomposedfs/options/options.go @@ -46,12 +46,8 @@ type Options struct { // propagate size changes as treesize TreeSizeAccounting bool `mapstructure:"treesize_accounting"` - // set an owner for the root node - Owner string `mapstructure:"owner"` - OwnerIDP string `mapstructure:"owner_idp"` - OwnerType string `mapstructure:"owner_type"` - - GatewayAddr string `mapstructure:"gateway_addr"` + // permissions service to use when checking permissions + PermissionsSVC string `mapstructure:"permissionssvc"` } // New returns a new Options instance for the given configuration diff --git a/pkg/storage/utils/decomposedfs/recycle.go b/pkg/storage/utils/decomposedfs/recycle.go index 4670e8281b..2be8e13695 100644 --- a/pkg/storage/utils/decomposedfs/recycle.go +++ b/pkg/storage/utils/decomposedfs/recycle.go @@ -20,8 +20,8 @@ package decomposedfs import ( "context" + "io/fs" "os" - "path" "path/filepath" "strings" "time" @@ -30,6 +30,7 @@ import ( types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/pkg/errors" @@ -47,13 +48,13 @@ import ( // ListRecycle returns the list of available recycle items // ref -> the space (= resourceid), key -> deleted node id, relativePath = relative to key func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error) { - log := appctx.GetLogger(ctx) - - items := make([]*provider.RecycleItem, 0) if ref == nil || ref.ResourceId == nil || ref.ResourceId.OpaqueId == "" { - return items, errtypes.BadRequest("spaceid required") + return nil, errtypes.BadRequest("spaceid required") } + spaceID := ref.ResourceId.OpaqueId + + sublog := appctx.GetLogger(ctx).With().Str("space", spaceID).Str("key", key).Str("relative_path", relativePath).Logger() // check permissions trashnode, err := fs.lu.NodeFromSpaceID(ctx, ref.ResourceId) @@ -70,92 +71,94 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference return nil, errtypes.PermissionDenied(key) } - spaceID := ref.ResourceId.OpaqueId if key == "" && relativePath == "/" { return fs.listTrashRoot(ctx, spaceID) } - trashRoot := fs.getRecycleRoot(ctx, spaceID) - f, err := os.Open(filepath.Join(trashRoot, key, relativePath)) + // build a list of trash items relative to the given trash root and path + items := make([]*provider.RecycleItem, 0) + + trashRootPath := filepath.Join(fs.getRecycleRoot(ctx, spaceID), lookup.Pathify(key, 4, 2)) + _, timeSuffix, err := readTrashLink(trashRootPath) if err != nil { - if os.IsNotExist(err) { - return items, nil + sublog.Error().Err(err).Str("trashRoot", trashRootPath).Msg("error reading trash link") + return nil, err + } + + origin := "" + // lookup origin path in extended attributes + if attrBytes, err := xattr.Get(trashRootPath, xattrs.TrashOriginAttr); err == nil { + origin = string(attrBytes) + } else { + sublog.Error().Err(err).Str("space", spaceID).Msg("could not read origin path, skipping") + return nil, err + } + + // all deleted items have the same deletion time + var deletionTime *types.Timestamp + if parsed, err := time.Parse(time.RFC3339Nano, timeSuffix); err == nil { + deletionTime = &types.Timestamp{ + Seconds: uint64(parsed.Unix()), + // TODO nanos } - return nil, errors.Wrapf(err, "tree: error listing %s", trashRoot) + } else { + sublog.Error().Err(err).Msg("could not parse time format, ignoring") } - defer f.Close() - parentNode, err := os.Readlink(filepath.Join(trashRoot, key)) + trashItemPath := filepath.Join(trashRootPath, relativePath) + + f, err := os.Open(trashItemPath) if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Msg("error reading trash link, skipping") - return nil, err + if os.IsNotExist(err) { + return items, nil + } + return nil, errors.Wrapf(err, "recycle: error opening trashItemPath %s", trashItemPath) } + defer f.Close() if md, err := f.Stat(); err != nil { return nil, err } else if !md.IsDir() { // this is the case when we want to directly list a file in the trashbin - item, err := fs.createTrashItem(ctx, parentNode, filepath.Dir(relativePath), filepath.Join(trashRoot, key, relativePath)) + item, err := fs.createTrashItem(ctx, md, filepath.Join(key, relativePath), deletionTime) if err != nil { return items, err } + item.Ref = &provider.Reference{ + Path: filepath.Join(origin, relativePath), + } items = append(items, item) return items, err } + // we have to read the names and stat the path to follow the symlinks names, err := f.Readdirnames(0) if err != nil { return nil, err } - for i := range names { - if item, err := fs.createTrashItem(ctx, parentNode, relativePath, filepath.Join(trashRoot, key, relativePath, names[i])); err == nil { + for _, name := range names { + md, err := os.Stat(filepath.Join(trashItemPath, name)) + if err != nil { + sublog.Error().Err(err).Str("name", name).Msg("could not stat, skipping") + continue + } + if item, err := fs.createTrashItem(ctx, md, filepath.Join(key, relativePath, name), deletionTime); err == nil { + item.Ref = &provider.Reference{ + Path: filepath.Join(origin, relativePath, name), + } items = append(items, item) } } return items, nil } -func (fs *Decomposedfs) createTrashItem(ctx context.Context, parentNode, intermediatePath, itemPath string) (*provider.RecycleItem, error) { - log := appctx.GetLogger(ctx) - trashnode, err := os.Readlink(itemPath) - if err != nil { - log.Error().Err(err).Msg("error reading trash link, skipping") - return nil, err - } - parts := strings.SplitN(filepath.Base(parentNode), node.TrashIDDelimiter, 2) - if len(parts) != 2 { - log.Error().Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") - return nil, errors.New("malformed trash link") - } - - nodePath := fs.lu.InternalPath(filepath.Base(trashnode)) - md, err := os.Stat(nodePath) - if err != nil { - log.Error().Err(err).Str("trashnode", trashnode).Msg("could not stat trash item, skipping") - return nil, err - } +func (fs *Decomposedfs) createTrashItem(ctx context.Context, md fs.FileInfo, key string, deletionTime *types.Timestamp) (*provider.RecycleItem, error) { item := &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Size: uint64(md.Size()), - Key: path.Join(parts[0], intermediatePath, filepath.Base(itemPath)), - } - if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { - item.DeletionTime = &types.Timestamp{ - Seconds: uint64(deletionTime.Unix()), - // TODO nanos - } - } else { - log.Error().Err(err).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") - } - - // lookup origin path in extended attributes - parentPath := fs.lu.InternalPath(filepath.Base(parentNode)) - if attrBytes, err := xattr.Get(parentPath, xattrs.TrashOriginAttr); err == nil { - item.Ref = &provider.Reference{Path: filepath.Join(string(attrBytes), intermediatePath, filepath.Base(itemPath))} - } else { - log.Error().Err(err).Str("link", trashnode).Msg("could not read origin path, skipping") - return nil, err + Type: getResourceType(md.IsDir()), + Size: uint64(md.Size()), + Key: key, + DeletionTime: deletionTime, } // TODO filter results by permission ... on the original parent? or the trashed node? @@ -166,56 +169,58 @@ func (fs *Decomposedfs) createTrashItem(ctx context.Context, parentNode, interme return item, nil } +// readTrashLink returns nodeID and timestamp +func readTrashLink(path string) (string, string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", "", err + } + // ../../../../../nodes/e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ..........nodese56c75a8-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + if link[0:15] != "..........nodes" || link[51:54] != node.TrashIDDelimiter { + return "", "", errtypes.InternalError("malformed trash link") + } + return link[15:51], link[54:], nil +} + func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*provider.RecycleItem, error) { log := appctx.GetLogger(ctx) items := make([]*provider.RecycleItem, 0) trashRoot := fs.getRecycleRoot(ctx, spaceID) - f, err := os.Open(trashRoot) - if err != nil { - if os.IsNotExist(err) { - return items, nil - } - return nil, errors.Wrap(err, "tree: error listing "+trashRoot) - } - defer f.Close() - - names, err := f.Readdirnames(0) + matches, err := filepath.Glob(trashRoot + "/*/*/*/*/*") if err != nil { return nil, err } - for i := range names { - trashnode, err := os.Readlink(filepath.Join(trashRoot, names[i])) + for _, itemPath := range matches { + nodeID, timeSuffix, err := readTrashLink(itemPath) if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Msg("error reading trash link, skipping") - continue - } - parts := strings.SplitN(filepath.Base(trashnode), node.TrashIDDelimiter, 2) - if len(parts) != 2 { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Msg("error reading trash link, skipping") continue } - nodePath := fs.lu.InternalPath(filepath.Base(trashnode)) + nodePath := fs.lu.InternalPath(spaceID, nodeID) + node.TrashIDDelimiter + timeSuffix md, err := os.Stat(nodePath) if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode). /*.Interface("parts", parts)*/ Msg("could not stat trash item, skipping") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not stat trash item, skipping") continue } item := &provider.RecycleItem{ Type: getResourceType(md.IsDir()), Size: uint64(md.Size()), - Key: parts[0], + Key: nodeID, } - if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { + if deletionTime, err := time.Parse(time.RFC3339Nano, timeSuffix); err == nil { item.DeletionTime = &types.Timestamp{ Seconds: uint64(deletionTime.Unix()), // TODO nanos } } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not parse time format, ignoring") } // lookup origin path in extended attributes @@ -223,7 +228,7 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p if attrBytes, err = xattr.Get(nodePath, xattrs.TrashOriginAttr); err == nil { item.Ref = &provider.Reference{Path: string(attrBytes)} } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read origin path, skipping") + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path, skipping") continue } // TODO filter results by permission ... on the original parent? or the trashed node? @@ -326,5 +331,5 @@ func getResourceType(isDir bool) provider.ResourceType { } func (fs *Decomposedfs) getRecycleRoot(ctx context.Context, spaceID string) string { - return filepath.Join(fs.o.Root, "trash", spaceID) + return filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2), "trash") } diff --git a/pkg/storage/utils/decomposedfs/recycle_test.go b/pkg/storage/utils/decomposedfs/recycle_test.go index eb71af14a2..e341ee89ee 100644 --- a/pkg/storage/utils/decomposedfs/recycle_test.go +++ b/pkg/storage/utils/decomposedfs/recycle_test.go @@ -47,8 +47,8 @@ var _ = Describe("Recycle", func() { When("a user deletes files from the same space", func() { BeforeEach(func() { - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ InitiateFileUpload: true, Delete: true, ListRecycle: true, @@ -132,8 +132,8 @@ var _ = Describe("Recycle", func() { Username: "anotherusername", }) - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ InitiateFileUpload: true, Delete: true, ListRecycle: true, @@ -240,12 +240,13 @@ var _ = Describe("Recycle", func() { When("a user deletes files from different spaces", func() { BeforeEach(func() { var err error + env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Times(1) // Permissions required for setup below (AddGrant) projectID, err = env.CreateTestStorageSpace("project", &provider.Quota{QuotaMaxBytes: 2000}) Expect(err).ToNot(HaveOccurred()) Expect(projectID).ToNot(BeNil()) - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ InitiateFileUpload: true, Delete: true, ListRecycle: true, @@ -289,7 +290,7 @@ var _ = Describe("Recycle", func() { Expect(len(items)).To(Equal(1)) // use up 2000 byte quota - _, err = env.CreateTestFile("largefile", "largefile-blobid", 2000, projectID.OpaqueId) + _, err = env.CreateTestFile("largefile", "largefile-blobid", projectID.OpaqueId, projectID.StorageId, 2000) Expect(err).ToNot(HaveOccurred()) err = env.Fs.RestoreRecycleItem(env.Ctx, &provider.Reference{ResourceId: projectID}, items[0].Key, "/", nil) @@ -316,8 +317,8 @@ var _ = Describe("Recycle", func() { Username: "readusername", }) - // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + // in this scenario user "u-s-e-r-id" has this permissions: + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ Delete: true, ListRecycle: true, PurgeRecycle: true, @@ -395,14 +396,14 @@ var _ = Describe("Recycle", func() { ctx = ctxpkg.ContextSetUser(context.Background(), &userpb.User{ Id: &userpb.UserId{ Idp: "maliciousidp", - OpaqueId: "hacker", + OpaqueId: "h-a-c-k-er", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "mrhacker", }) // in this scenario user "userid" has this permissions: - registerPermissions(env.Permissions, "userid", &provider.ResourcePermissions{ + registerPermissions(env.Permissions, "u-s-e-r-id", &provider.ResourcePermissions{ Delete: true, ListRecycle: true, PurgeRecycle: true, @@ -410,7 +411,7 @@ var _ = Describe("Recycle", func() { }) // and user "hacker" has no permissions: - registerPermissions(env.Permissions, "hacker", &provider.ResourcePermissions{}) + registerPermissions(env.Permissions, "h-a-c-k-er", &provider.ResourcePermissions{}) }) It("cannot delete, list, purge or restore", func() { diff --git a/pkg/storage/utils/decomposedfs/revisions.go b/pkg/storage/utils/decomposedfs/revisions.go index 8e50b8fcc2..912122566d 100644 --- a/pkg/storage/utils/decomposedfs/revisions.go +++ b/pkg/storage/utils/decomposedfs/revisions.go @@ -64,12 +64,17 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen revisions = []*provider.FileVersion{} np := n.InternalPath() - if items, err := filepath.Glob(np + ".REV.*"); err == nil { + if items, err := filepath.Glob(np + node.RevisionIDDelimiter + "*"); err == nil { for i := range items { if fi, err := os.Stat(items[i]); err == nil { + parts := strings.SplitN(fi.Name(), node.RevisionIDDelimiter, 2) + if len(parts) != 2 { + appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("invalid revision name, skipping") + continue + } mtime := fi.ModTime() rev := &provider.FileVersion{ - Key: filepath.Base(items[i]), + Key: n.ID + node.RevisionIDDelimiter + parts[1], Mtime: uint64(mtime.Unix()), } blobSize, err := node.ReadBlobSizeAttr(items[i]) @@ -94,15 +99,16 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe log := appctx.GetLogger(ctx) // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) if len(kp) != 2 { log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") return nil, errtypes.NotFound(revisionKey) } log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") + spaceID := ref.ResourceId.OpaqueId // check if the node is available and has not been deleted - n, err := node.ReadNode(ctx, fs.lu, kp[0]) + n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0]) if err != nil { return nil, err } @@ -122,7 +128,7 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) } - contentPath := fs.lu.InternalPath(revisionKey) + contentPath := fs.lu.InternalPath(spaceID, revisionKey) r, err := os.Open(contentPath) if err != nil { @@ -139,14 +145,15 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer log := appctx.GetLogger(ctx) // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) + kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2) if len(kp) != 2 { log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") return errtypes.NotFound(revisionKey) } + spaceID := ref.ResourceId.StorageId // check if the node is available and has not been deleted - n, err := node.ReadNode(ctx, fs.lu, kp[0]) + n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0]) if err != nil { return err } @@ -171,11 +178,11 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer } // move current version to new revision - nodePath := fs.lu.InternalPath(kp[0]) + nodePath := fs.lu.InternalPath(spaceID, kp[0]) var fi os.FileInfo if fi, err = os.Stat(nodePath); err == nil { // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath := fs.lu.InternalPath(kp[0] + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) + versionsPath := fs.lu.InternalPath(spaceID, kp[0]+node.RevisionIDDelimiter+fi.ModTime().UTC().Format(time.RFC3339Nano)) err = os.Rename(nodePath, versionsPath) if err != nil { @@ -184,7 +191,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer // copy old revision to current location - revisionPath := fs.lu.InternalPath(revisionKey) + revisionPath := fs.lu.InternalPath(spaceID, revisionKey) if err = os.Rename(revisionPath, nodePath); err != nil { return diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index 4fa9aab127..9fb6b9f028 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -30,7 +30,7 @@ import ( "time" userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - permissionsv1beta1 "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" @@ -39,29 +39,25 @@ import ( ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/rgrpc/status" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/utils" "github.com/cs3org/reva/pkg/utils/resourceid" "github.com/google/uuid" + "github.com/pkg/errors" ) const ( spaceTypePersonal = "personal" - spaceTypeProject = "project" - spaceTypeShare = "share" - spaceTypeAny = "*" - spaceIDAny = "*" + // spaceTypeProject = "project" + spaceTypeShare = "share" + spaceTypeAny = "*" + spaceIDAny = "*" ) // CreateStorageSpace creates a storage space func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { - // spaces will be located by default in the root of the storage. - r, err := fs.lu.RootNode(ctx) - if err != nil { - return nil, err - } // "everything is a resource" this is the unique ID for the Space resource. spaceID := uuid.New().String() @@ -81,48 +77,41 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr // TODO enforce a uuid? // TODO clarify if we want to enforce a single personal storage space or if we want to allow sending the spaceid if req.Type == spaceTypePersonal { - spaceID = req.Owner.Id.OpaqueId - } - - n, err := r.Child(ctx, spaceID) - if err != nil { - return nil, err + spaceID = req.GetOwner().GetId().GetOpaqueId() } - if n.Exists { + root, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID) + if err == nil && root.Exists { return nil, errtypes.AlreadyExists("decomposedfs: spaces: space already exists") } - // spaceid and nodeid must be the same - // TODO enforce a uuid? - n.ID = spaceID - - if err := fs.tp.CreateDir(ctx, n); err != nil { - return nil, err + if !fs.canCreateSpace(ctx, spaceID) { + return nil, errtypes.PermissionDenied(spaceID) } - // always enable propagation on the storage space root - // mark the space root node as the end of propagation - if err = n.SetMetadata(xattrs.PropagationAttr, "1"); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") - return nil, err + // create a directory node + rootPath := root.InternalPath() + if err = os.MkdirAll(rootPath, 0700); err != nil { + return nil, errors.Wrap(err, "decomposedfs: error creating node") } - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return nil, fmt.Errorf("decomposedfs: spaces: contextual user not found") + if err := root.WriteAllNodeMetadata(); err != nil { + return nil, err } - - ownerID := u.Id - if req.Type == spaceTypeProject { - ownerID = &userv1beta1.UserId{} + if req.GetOwner() != nil && req.GetOwner().GetId() != nil { + if err := root.WriteOwner(req.GetOwner().GetId()); err != nil { + return nil, err + } } - if err := n.ChangeOwner(ownerID); err != nil { + // always enable propagation on the storage space root + // mark the space root node as the end of propagation + if err = root.SetMetadata(xattrs.PropagationAttr, "1"); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", root).Msg("could not mark space root node to propagate") return nil, err } - err = fs.createStorageSpace(ctx, req.Type, n.ID) + err = fs.linkStorageSpaceType(ctx, req.Type, root.ID) if err != nil { return nil, err } @@ -137,30 +126,33 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr if description != "" { metadata[xattrs.SpaceDescriptionAttr] = description } - if err := xattrs.SetMultiple(n.InternalPath(), metadata); err != nil { + if err := xattrs.SetMultiple(root.InternalPath(), metadata); err != nil { return nil, err } ctx = context.WithValue(ctx, utils.SpaceGrant, struct{}{}) - if err := fs.AddGrant(ctx, &provider.Reference{ - ResourceId: &provider.ResourceId{ - StorageId: spaceID, - OpaqueId: spaceID, - }, - }, &provider.Grant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{ - UserId: u.Id, + if req.Type != spaceTypePersonal { + u := ctxpkg.ContextMustGetUser(ctx) + if err := fs.AddGrant(ctx, &provider.Reference{ + ResourceId: &provider.ResourceId{ + StorageId: spaceID, + OpaqueId: spaceID, }, - }, - Permissions: ocsconv.NewManagerRole().CS3ResourcePermissions(), - }); err != nil { - return nil, err + }, &provider.Grant{ + Grantee: &provider.Grantee{ + Type: provider.GranteeType_GRANTEE_TYPE_USER, + Id: &provider.Grantee_UserId{ + UserId: u.Id, + }, + }, + Permissions: ocsconv.NewManagerRole().CS3ResourcePermissions(), + }); err != nil { + return nil, err + } } - space, err := fs.storageSpaceFromNode(ctx, n, "*", n.InternalPath(), false) + space, err := fs.storageSpaceFromNode(ctx, root, req.Type, root.InternalPath(), false) if err != nil { return nil, err } @@ -174,6 +166,62 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr return resp, nil } +func (fs *Decomposedfs) canListAllSpaces(ctx context.Context) bool { + user := ctxpkg.ContextMustGetUser(ctx) + checkRes, err := fs.permissionsClient.CheckPermission(ctx, &cs3permissions.CheckPermissionRequest{ + Permission: "list-all-spaces", + SubjectRef: &cs3permissions.SubjectReference{ + Spec: &cs3permissions.SubjectReference_UserId{ + UserId: user.Id, + }, + }, + }) + if err != nil { + return false + } + + return checkRes.Status.Code == v1beta11.Code_CODE_OK +} + +// returns true when the user in the context can create a space / resource with storageID and nodeID set to his user opaqueID +func (fs *Decomposedfs) canCreateSpace(ctx context.Context, spaceID string) bool { + user := ctxpkg.ContextMustGetUser(ctx) + checkRes, err := fs.permissionsClient.CheckPermission(ctx, &cs3permissions.CheckPermissionRequest{ + Permission: "create-space", + SubjectRef: &cs3permissions.SubjectReference{ + Spec: &cs3permissions.SubjectReference_UserId{ + UserId: user.Id, + }, + }, + Ref: &provider.Reference{ + ResourceId: &provider.ResourceId{ + StorageId: spaceID, + // OpaqueId is the same, no need to transfer it + }, + }, + }) + if err != nil { + return false + } + + return checkRes.Status.Code == v1beta11.Code_CODE_OK +} + +func readSpaceAndNodeFromSpaceTypeLink(path string) (string, string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", "", err + } + // ../../spaces/4c/510ada-c86b-4815-8820-42cdf82c3d51/nodes/4c/51/0a/da/-c86b-4815-8820-42cdf82c3d51 + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ....spaces4c510ada-c86b-4815-8820-42cdf82c3d51nodes4c510ada-c86b-4815-8820-42cdf82c3d51 + if link[0:10] != "....spaces" || link[46:51] != "nodes" { + return "", "", errtypes.InternalError("malformed link") + } + return link[10:46], link[51:], nil +} + // ListStorageSpaces returns a list of StorageSpaces. // The list can be filtered by space type or space id. // Spaces are persisted with symlinks in /spaces// pointing to ../../nodes/, the root node of the space @@ -218,15 +266,42 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide spaceTypes = []string{"*"} } + canListAllSpaces := fs.canListAllSpaces(ctx) + spaces := []*provider.StorageSpace{} // build the glob path, eg. // /path/to/root/spaces/{spaceType}/{spaceId} // /path/to/root/spaces/personal/nodeid // /path/to/root/spaces/shared/nodeid + if spaceID != spaceIDAny && nodeID != spaceIDAny { + // try directly reading the node + n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("could not read node") + return nil, err + } + if !n.Exists { + // return empty list + return spaces, nil + } + space, err := fs.storageSpaceFromNode(ctx, n, spaceTypeAny, n.InternalPath(), canListAllSpaces) + if err != nil { + return nil, err + } + // filter space types + for _, spaceType := range spaceTypes { + if spaceType == "*" || spaceType == space.SpaceType { + spaces = append(spaces, space) + } + } + + return spaces, nil + } + matches := []string{} for _, spaceType := range spaceTypes { - path := filepath.Join(fs.o.Root, "spaces", spaceType, nodeID) + path := filepath.Join(fs.o.Root, "spacetypes", spaceType, nodeID) m, err := filepath.Glob(path) if err != nil { return nil, err @@ -246,41 +321,19 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide // the personal spaces must also use the nodeid and not the name numShares := 0 - client, err := pool.GetGatewayServiceClient(fs.o.GatewayAddr) - if err != nil { - return nil, err - } - - user := ctxpkg.ContextMustGetUser(ctx) - checkRes, err := client.CheckPermission(ctx, &permissionsv1beta1.CheckPermissionRequest{ - Permission: "list-all-spaces", - SubjectRef: &permissionsv1beta1.SubjectReference{ - Spec: &permissionsv1beta1.SubjectReference_UserId{ - UserId: user.Id, - }, - }, - }) - if err != nil { - return nil, err - } - - canListAllSpaces := false - if checkRes.Status.Code == v1beta11.Code_CODE_OK { - canListAllSpaces = true - } for i := range matches { - var target string var err error // always read link in case storage space id != node id - if target, err = os.Readlink(matches[i]); err != nil { + spaceID, nodeID, err = readSpaceAndNodeFromSpaceTypeLink(matches[i]) + if err != nil { appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[i]).Msg("could not read link, skipping") continue } - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("id", filepath.Base(target)).Msg("could not read node, skipping") + appctx.GetLogger(ctx).Error().Err(err).Str("id", nodeID).Msg("could not read node, skipping") continue } @@ -307,8 +360,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide // if there are no matches (or they happened to be spaces for the owner) and the node is a child return a space if len(matches) <= numShares && nodeID != spaceID { // try node id - target := filepath.Join(fs.o.Root, "nodes", nodeID) - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) if err != nil { return nil, err } @@ -333,10 +385,10 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } space := req.StorageSpace - _, spaceID, _ := utils.SplitStorageSpaceID(space.Id.OpaqueId) + spaceID, nodeID, _ := utils.SplitStorageSpaceID(space.Id.OpaqueId) if restore { - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, spaceID)) + matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spacetypes", spaceTypeAny, nodeID)) if err != nil { return nil, err } @@ -355,8 +407,9 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up if err != nil { appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") } - - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + nodeID := strings.TrimLeft(target, "/.") + nodeID = strings.ReplaceAll(nodeID, "/", "") + n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) if err != nil { return nil, err } @@ -371,26 +424,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } } - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, spaceID)) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return &provider.UpdateStorageSpaceResponse{ - Status: &v1beta11.Status{ - Code: v1beta11.Code_CODE_NOT_FOUND, - Message: fmt.Sprintf("update space failed: found %d matching spaces", len(matches)), - }, - }, nil - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - - node, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + node, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID) if err != nil { return nil, err } @@ -482,21 +516,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De spaceID := req.Id.OpaqueId - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, spaceID)) - if err != nil { - return err - } - - if len(matches) != 1 { - return fmt.Errorf("delete space failed: found %d matching spaces", len(matches)) - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID) if err != nil { return err } @@ -505,7 +525,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De if !strings.Contains(n.Name, node.TrashIDDelimiter) { return errtypes.NewErrtypeFromStatus(status.NewInvalidArg(ctx, "can't purge enabled space")) } - ip := fs.lu.InternalPath(req.Id.OpaqueId) + ip := n.InternalPath() matches, err := filepath.Glob(ip) if err != nil { return err @@ -516,7 +536,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De return err } - matches, err = filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, req.Id.OpaqueId)) + matches, err = filepath.Glob(filepath.Join(fs.o.Root, "spacetypes", spaceTypeAny, req.Id.OpaqueId)) if err != nil { return err } @@ -528,7 +548,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De return err } - matches, err = filepath.Glob(filepath.Join(fs.o.Root, "nodes", node.RootID, req.Id.OpaqueId+node.TrashIDDelimiter+"*")) + matches, err = filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceID, "nodes", node.RootID, req.Id.OpaqueId+node.TrashIDDelimiter+"*")) if err != nil { return err } @@ -536,9 +556,9 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De if len(matches) != 1 { return fmt.Errorf("delete root node failed: found %d matching root nodes", len(matches)) } - return os.RemoveAll(matches[0]) } + // don't delete - just rename dn := *n deletionTime := time.Now().UTC().Format(time.RFC3339Nano) @@ -548,6 +568,14 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De if err != nil { return err } + matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spacetypes", spaceTypeAny, spaceID)) + if err != nil { + return err + } + + if len(matches) != 1 { + return fmt.Errorf("delete space failed: found %d matching spaces", len(matches)) + } err = os.RemoveAll(matches[0]) if err != nil { @@ -559,14 +587,14 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De return os.Symlink(trashPath, np) } -func (fs *Decomposedfs) createStorageSpace(ctx context.Context, spaceType, spaceID string) error { +func (fs *Decomposedfs) linkStorageSpaceType(ctx context.Context, spaceType string, spaceID string) error { // create space type dir - if err := os.MkdirAll(filepath.Join(fs.o.Root, "spaces", spaceType), 0700); err != nil { + if err := os.MkdirAll(filepath.Join(fs.o.Root, "spacetypes", spaceType), 0700); err != nil { return err } - // we can reuse the node id as the space id - err := os.Symlink("../../nodes/"+spaceID, filepath.Join(fs.o.Root, "spaces", spaceType, spaceID)) + // link space in spacetypes + err := os.Symlink("../../spaces/"+lookup.Pathify(spaceID, 1, 2)+"/nodes/"+lookup.Pathify(spaceID, 4, 2), filepath.Join(fs.o.Root, "spacetypes", spaceType, spaceID)) if err != nil { if isAlreadyExists(err) { appctx.GetLogger(ctx).Debug().Err(err).Str("space", spaceID).Str("spacetype", spaceType).Msg("symlink already exists") @@ -602,11 +630,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, } } - owner, err := n.Owner() - if err != nil { - return nil, err - } - + var err error // TODO apply more filters var sname string if sname, err = n.GetMetadata(xattrs.SpaceNameAttr); err != nil { @@ -618,7 +642,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, return nil, err } - glob := filepath.Join(fs.o.Root, "spaces", spaceType, n.SpaceRoot.ID) + glob := filepath.Join(fs.o.Root, "spacetypes", spaceType, n.SpaceRoot.ID) matches, err := filepath.Glob(glob) if err != nil { return nil, err @@ -680,9 +704,9 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, } } - if spaceType != spaceTypeProject && owner.OpaqueId != "" { + if n.Owner() != nil && n.Owner().OpaqueId != "" { space.Owner = &userv1beta1.User{ // FIXME only return a UserID, not a full blown user object - Id: owner, + Id: n.Owner(), } } diff --git a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go index 102647811a..25ce44889c 100644 --- a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go +++ b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go @@ -23,12 +23,15 @@ import ( "os" "path/filepath" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/google/uuid" "github.com/pkg/xattr" "github.com/stretchr/testify/mock" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" + v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" ruser "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/storage" @@ -43,15 +46,16 @@ import ( // TestEnv represents a test environment for unit tests type TestEnv struct { - Root string - Fs storage.FS - Tree *tree.Tree - Permissions *mocks.PermissionsChecker - Blobstore *treemocks.Blobstore - Owner *userpb.User - Lookup *decomposedfs.Lookup - Ctx context.Context - SpaceRootRes *providerv1beta1.ResourceId + Root string + Fs storage.FS + Tree *tree.Tree + Permissions *mocks.PermissionsChecker + Blobstore *treemocks.Blobstore + Owner *userpb.User + Lookup *lookup.Lookup + Ctx context.Context + SpaceRootRes *providerv1beta1.ResourceId + PermissionsClient *mocks.CS3PermissionsClient } // NewTestEnv prepares a test environment on disk @@ -81,30 +85,32 @@ func NewTestEnv() (*TestEnv, error) { owner := &userpb.User{ Id: &userpb.UserId{ Idp: "idp", - OpaqueId: "userid", + OpaqueId: "u-s-e-r-id", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "username", } - lookup := &decomposedfs.Lookup{Options: o} + lookup := &lookup.Lookup{Options: o} permissions := &mocks.PermissionsChecker{} + cs3permissionsclient := &mocks.CS3PermissionsClient{} bs := &treemocks.Blobstore{} tree := tree.New(o.Root, true, true, lookup, bs) - fs, err := decomposedfs.New(o, lookup, permissions, tree) + fs, err := decomposedfs.New(o, lookup, permissions, tree, cs3permissionsclient) if err != nil { return nil, err } ctx := ruser.ContextSetUser(context.Background(), owner) env := &TestEnv{ - Root: tmpRoot, - Fs: fs, - Tree: tree, - Lookup: lookup, - Permissions: permissions, - Blobstore: bs, - Owner: owner, - Ctx: ctx, + Root: tmpRoot, + Fs: fs, + Tree: tree, + Lookup: lookup, + Permissions: permissions, + Blobstore: bs, + Owner: owner, + Ctx: ctx, + PermissionsClient: cs3permissionsclient, } env.SpaceRootRes, err = env.CreateTestStorageSpace("personal", nil) @@ -136,9 +142,10 @@ func (t *TestEnv) CreateTestDir(name string, parentRef *providerv1beta1.Referenc } // CreateTestFile creates a new file and its metadata and returns a corresponding Node -func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID string) (*node.Node, error) { - // Create file in dir1 - file := node.New( +func (t *TestEnv) CreateTestFile(name, blobID, parentID, spaceID string, blobSize int64) (*node.Node, error) { + // Create n in dir1 + n := node.New( + spaceID, uuid.New().String(), parentID, name, @@ -147,22 +154,26 @@ func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID s nil, t.Lookup, ) - _, err := os.OpenFile(file.InternalPath(), os.O_CREATE, 0700) + nodePath := n.InternalPath() + if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil { + return nil, err + } + _, err := os.OpenFile(nodePath, os.O_CREATE, 0700) if err != nil { return nil, err } - err = file.WriteAllNodeMetadata(t.Owner.Id) + err = n.WriteAllNodeMetadata() if err != nil { return nil, err } // Link in parent - childNameLink := filepath.Join(t.Lookup.InternalPath(file.ParentID), file.Name) - err = os.Symlink("../"+file.ID, childNameLink) + childNameLink := filepath.Join(n.ParentInternalPath(), n.Name) + err = os.Symlink("../../../../../"+lookup.Pathify(n.ID, 4, 2), childNameLink) if err != nil { return nil, err } - return file, err + return n, n.FindStorageSpaceRoot() } // CreateTestStorageSpace will create a storage space with some directories and files @@ -172,7 +183,9 @@ func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID s // /dir1/file1 // /dir1/subdir1 func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quota) (*providerv1beta1.ResourceId, error) { - t.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Times(1) // Permissions required for setup below + t.PermissionsClient.On("CheckPermission", mock.Anything, mock.Anything, mock.Anything).Times(1).Return(&cs3permissions.CheckPermissionResponse{ + Status: &v1beta11.Status{Code: v1beta11.Code_CODE_OK}, + }, nil) space, err := t.Fs.CreateStorageSpace(t.Ctx, &providerv1beta1.CreateStorageSpaceRequest{ Owner: t.Owner, Type: typ, @@ -185,7 +198,7 @@ func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quot ref := buildRef(space.StorageSpace.Id.OpaqueId, "") // the space name attribute is the stop condition in the lookup - h, err := node.ReadNode(t.Ctx, t.Lookup, space.StorageSpace.Id.OpaqueId) + h, err := node.ReadNode(t.Ctx, t.Lookup, space.StorageSpace.Id.OpaqueId, space.StorageSpace.Id.OpaqueId) if err != nil { return nil, err } @@ -201,7 +214,7 @@ func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quot } // Create file1 in dir1 - _, err = t.CreateTestFile("file1", "file1-blobid", 1234, dir1.ID) + _, err = t.CreateTestFile("file1", "file1-blobid", dir1.ID, dir1.SpaceID, 1234) if err != nil { return nil, err } diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index 3a65789614..10e6e05725 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -28,27 +28,21 @@ import ( "strings" "time" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/utils" "github.com/google/uuid" "github.com/pkg/errors" - "github.com/pkg/xattr" "github.com/rs/zerolog/log" ) // go:generate mockery -name Blobstore -const ( - spaceTypePersonal = "personal" - spaceTypeShare = "share" -) - // Blobstore defines an interface for storing blobs in a blobstore type Blobstore interface { Upload(key string, reader io.Reader) error @@ -60,10 +54,9 @@ type Blobstore interface { type PathLookup interface { NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) - RootNode(ctx context.Context) (node *node.Node, err error) InternalRoot() string - InternalPath(ID string) string + InternalPath(spaceID, nodeID string) string Path(ctx context.Context, n *node.Node) (path string, err error) ShareFolder() string } @@ -93,14 +86,13 @@ func New(root string, tta bool, tsa bool, lu PathLookup, bs Blobstore) *Tree { } // Setup prepares the tree structure -func (t *Tree) Setup(owner *userpb.UserId, propagateToRoot bool) error { +func (t *Tree) Setup() error { // create data paths for internal layout dataPaths := []string{ - filepath.Join(t.root, "nodes"), + filepath.Join(t.root, "spaces"), // notes contain symlinks from nodes//uploads/ to ../../uploads/ // better to keep uploads on a fast / volatile storage before a workflow finally moves them to the nodes dir filepath.Join(t.root, "uploads"), - filepath.Join(t.root, "trash"), } for _, v := range dataPaths { err := os.MkdirAll(v, 0700) @@ -109,37 +101,12 @@ func (t *Tree) Setup(owner *userpb.UserId, propagateToRoot bool) error { } } - // the root node has an empty name - // the root node has no parent - n := node.New(node.RootID, "", "", 0, "", nil, t.lookup) - err := t.createNode(n, owner) - if err != nil { - return err - } - - // set propagation flag - v := "0" - if propagateToRoot { - v = "1" - } - if err = n.SetMetadata(xattrs.PropagationAttr, v); err != nil { - return err - } - // create spaces folder and iterate over existing nodes to populate it - spacesPath := filepath.Join(t.root, "spaces") - fi, err := os.Stat(spacesPath) - if os.IsNotExist(err) { - // create personal spaces dir - if err := os.MkdirAll(filepath.Join(spacesPath, spaceTypePersonal), 0700); err != nil { - return err - } - // create share spaces dir - if err := os.MkdirAll(filepath.Join(spacesPath, spaceTypeShare), 0700); err != nil { - return err - } + nodesPath := filepath.Join(t.root, "nodes") + fi, err := os.Stat(nodesPath) + if err == nil && fi.IsDir() { - f, err := os.Open(filepath.Join(t.root, "nodes")) + f, err := os.Open(nodesPath) if err != nil { return err } @@ -148,41 +115,66 @@ func (t *Tree) Setup(owner *userpb.UserId, propagateToRoot bool) error { return err } - for i := range nodes { - nodePath := filepath.Join(t.root, "nodes", nodes[i].Name()) + for _, node := range nodes { + nodePath := filepath.Join(nodesPath, node.Name()) - // is it a user root? -> create personal space if isRootNode(nodePath) { - // we can reuse the node id as the space id - t.linkSpace(spaceTypePersonal, nodes[i].Name(), nodes[i].Name()) + if err := t.moveNode(node.Name(), node.Name()); err != nil { + logger.New().Error().Err(err). + Str("space", node.Name()). + Msg("could not move space") + continue + } + t.linkSpace("personal", node.Name()) } + } + // TODO delete nodesPath if empty + + } - // is it a shared node? -> create share space - if isSharedNode(nodePath) { - // we can reuse the node id as the space id - t.linkSpace(spaceTypeShare, nodes[i].Name(), nodes[i].Name()) + return nil +} +func (t *Tree) moveNode(spaceID, nodeID string) error { + dirPath := filepath.Join(t.root, "nodes", nodeID) + f, err := os.Open(dirPath) + if err != nil { + return err + } + children, err := f.Readdir(0) + if err != nil { + return err + } + for _, child := range children { + old := filepath.Join(t.root, "nodes", child.Name()) + new := filepath.Join(t.root, "spaces", spaceID, "nodes", child.Name()) + if err := os.Rename(old, new); err != nil { + logger.New().Error().Err(err). + Str("space", spaceID). + Str("nodes", child.Name()). + Str("oldpath", old). + Str("newpath", new). + Msg("could not rename node") + } + if child.IsDir() { + if err := t.moveNode(spaceID, child.Name()); err != nil { + return err } } - } else if !fi.IsDir() { - // check if it is a directory - return fmt.Errorf("%s is not a directory", spacesPath) } - return nil } // linkSpace creates a new symbolic link for a space with the given type st, and node id -func (t *Tree) linkSpace(spaceType, spaceID, nodeID string) { - spacesPath := filepath.Join(t.root, "spaces", spaceType, spaceID) - expectedTarget := "../../nodes/" + nodeID - linkTarget, err := os.Readlink(spacesPath) +func (t *Tree) linkSpace(spaceType, spaceID string) { + spaceTypesPath := filepath.Join(t.root, "spacetypes", spaceType, spaceID) + expectedTarget := "../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2) + linkTarget, err := os.Readlink(spaceTypesPath) if errors.Is(err, os.ErrNotExist) { - err = os.Symlink(expectedTarget, spacesPath) + err = os.Symlink(expectedTarget, spaceTypesPath) if err != nil { logger.New().Error().Err(err). Str("space_type", spaceType). Str("space", spaceID). - Str("node", nodeID). Msg("could not create symlink") } } else { @@ -190,14 +182,12 @@ func (t *Tree) linkSpace(spaceType, spaceID, nodeID string) { logger.New().Error().Err(err). Str("space_type", spaceType). Str("space", spaceID). - Str("node", nodeID). Msg("could not read symlink") } if linkTarget != expectedTarget { logger.New().Warn(). Str("space_type", spaceType). Str("space", spaceID). - Str("node", nodeID). Str("expected", expectedTarget). Str("actual", linkTarget). Msg("expected a different link target") @@ -209,6 +199,8 @@ func isRootNode(nodePath string) bool { attr, err := xattrs.Get(nodePath, xattrs.ParentidAttr) return err == nil && attr == node.RootID } + +/* func isSharedNode(nodePath string) bool { if attrs, err := xattr.List(nodePath); err == nil { for i := range attrs { @@ -219,6 +211,7 @@ func isSharedNode(nodePath string) bool { } return false } +*/ // GetMD returns the metadata of a node in the tree func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) { @@ -245,25 +238,14 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) { n.ID = uuid.New().String() } - // who will become the owner? the owner of the parent node, not the current user - var p *node.Node - p, err = n.Parent() - if err != nil { - return - } - var owner *userpb.UserId - owner, err = p.Owner() - if err != nil { - return - } - - err = t.createNode(n, owner) + err = t.createNode(n) if err != nil { return } // make child appear in listings - err = os.Symlink("../"+n.ID, filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name)) + relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2)) + err = os.Symlink(relativeNodePath, filepath.Join(n.ParentInternalPath(), n.Name)) if err != nil { // no better way to check unfortunately if !strings.Contains(err.Error(), "file exists") { @@ -298,7 +280,8 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) // are we just renaming (parent stays the same)? if oldNode.ParentID == newNode.ParentID { - parentPath := t.lookup.InternalPath(oldNode.ParentID) + // parentPath := t.lookup.InternalPath(oldNode.SpaceID, oldNode.ParentID) + parentPath := oldNode.ParentInternalPath() // rename child err = os.Rename( @@ -322,8 +305,8 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) // rename child err = os.Rename( - filepath.Join(t.lookup.InternalPath(oldNode.ParentID), oldNode.Name), - filepath.Join(t.lookup.InternalPath(newNode.ParentID), newNode.Name), + filepath.Join(oldNode.ParentInternalPath(), oldNode.Name), + filepath.Join(newNode.ParentInternalPath(), newNode.Name), ) if err != nil { return errors.Wrap(err, "Decomposedfs: could not move child") @@ -352,6 +335,16 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) return nil } +func readChildNodeFromLink(path string) (string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", err + } + nodeID := strings.TrimLeft(link, "/.") + nodeID = strings.ReplaceAll(nodeID, "/", "") + return nodeID, nil +} + // ListFolder lists the content of a folder node func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) { dir := n.InternalPath() @@ -370,13 +363,13 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro } nodes := []*node.Node{} for i := range names { - link, err := os.Readlink(filepath.Join(dir, names[i])) + nodeID, err := readChildNodeFromLink(filepath.Join(dir, names[i])) if err != nil { // TODO log continue } - child, err := node.ReadNode(ctx, t.lookup, filepath.Base(link)) + child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID) if err != nil { // TODO log continue @@ -394,14 +387,9 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { deletingSharedResource := ctx.Value(appctx.DeletingSharedResource) if deletingSharedResource != nil && deletingSharedResource.(bool) { - src := filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name) + src := filepath.Join(n.ParentInternalPath(), n.Name) return os.Remove(src) } - // Prepare the trash - err = os.MkdirAll(filepath.Join(t.root, "trash", n.SpaceRoot.ID), 0700) - if err != nil { - return - } // get the original path origin, err := t.lookup.Path(ctx, n) @@ -417,13 +405,24 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { deletionTime := time.Now().UTC().Format(time.RFC3339Nano) + // Prepare the trash + trashLink := filepath.Join(t.root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) + if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil { + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) + return err + } + + // FIXME can we just move the node into the trash dir? instead of adding another symlink and appending a trash timestamp? + // can we just use the mtime as the trash time? + // TODO store a trashed by userid + // first make node appear in the space trash // parent id and name are stored as extended attributes in the node itself - trashLink := filepath.Join(t.root, "trash", n.SpaceRoot.ID, n.ID) - err = os.Symlink("../../nodes/"+n.ID+node.TrashIDDelimiter+deletionTime, trashLink) + err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink) if err != nil { - // To roll back changes - // TODO unset trashOriginAttr + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) return } @@ -435,7 +434,8 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { if err != nil { // To roll back changes // TODO remove symlink - // TODO unset trashOriginAttr + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) return } @@ -443,13 +443,14 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { _ = os.Remove(n.LockFilePath()) // finally remove the entry from the parent dir - src := filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name) + src := filepath.Join(n.ParentInternalPath(), n.Name) err = os.Remove(src) if err != nil { // To roll back changes // TODO revert the rename // TODO remove symlink - // TODO unset trashOriginAttr + // Roll back changes + _ = n.RemoveMetadata(xattrs.TrashOriginAttr) return } @@ -490,7 +491,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa } // add the entry for the parent dir - err = os.Symlink("../"+recycleNode.ID, filepath.Join(t.lookup.InternalPath(targetNode.ParentID), targetNode.Name)) + err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentInternalPath(), targetNode.Name)) if err != nil { return err } @@ -506,21 +507,6 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa } } - // the new node will inherit the permissions of its parent - p, err := targetNode.Parent() - if err != nil { - return err - } - - po, err := p.Owner() - if err != nil { - return err - } - - if err := recycleNode.ChangeOwner(po); err != nil { - return err - } - targetNode.Exists = true // update name attribute if err := recycleNode.SetMetadata(xattrs.NameAttr, targetNode.Name); err != nil { @@ -536,7 +522,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa // delete item link in trash if err = os.Remove(trashItem); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trashitem") + log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item") } return t.Propagate(ctx, targetNode) } @@ -551,6 +537,8 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa } fn := func() error { + // delete the actual node + // TODO recursively delete children if err := os.RemoveAll(deletedNodePath); err != nil { log.Error().Err(err).Str("deletedNodePath", deletedNodePath).Msg("error deleting trash node") return err @@ -586,15 +574,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { } // is propagation enabled for the parent node? - - var root *node.Node - if n.SpaceRoot == nil { - if root, err = t.lookup.RootNode(ctx); err != nil { - return - } - } else { - root = n.SpaceRoot - } + root := n.SpaceRoot // use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly sTime := time.Now().UTC() @@ -772,56 +752,81 @@ func (t *Tree) DeleteBlob(key string) error { } // TODO check if node exists? -func (t *Tree) createNode(n *node.Node, owner *userpb.UserId) (err error) { +func (t *Tree) createNode(n *node.Node) (err error) { // create a directory node nodePath := n.InternalPath() if err = os.MkdirAll(nodePath, 0700); err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } - return n.WriteAllNodeMetadata(owner) + return n.WriteAllNodeMetadata() } -// TODO refactor the returned params into Node properties? would make all the path transformations go away... -func (t *Tree) readRecycleItem(ctx context.Context, spaceid, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) { - if key == "" { - return nil, "", "", "", errtypes.InternalError("key is empty") +// readTrashLink returns nodeID and timestamp +func readTrashLink(path string) (string, string, error) { + link, err := os.Readlink(path) + if err != nil { + return "", "", err } + // ../../../../../nodes/e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ..........nodese56c75a8-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z + if link[0:15] != "..........nodes" || link[51:54] != ".T." { + return "", "", errtypes.InternalError("malformed trash link") + } + return link[15:51], link[54:], nil +} - trashItem = filepath.Join(t.lookup.InternalRoot(), "trash", spaceid, key, path) - - var link string - link, err = os.Readlink(trashItem) +// readTrashChildLink returns nodeID +func readTrashChildLink(path string) (string, error) { + link, err := os.Readlink(path) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return + return "", err } + // ../../../../../e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094 + // TODO use filepath.Separator to support windows + link = strings.ReplaceAll(link, "/", "") + // ..........e56c75a8-d235-4cbb-8b4e-48b6fd0f2094 + if link[0:10] != ".........." { + return "", errtypes.InternalError("malformed trash child link") + } + return link[10:], nil +} - var attrStr string - trashNodeID := filepath.Base(link) - deletedNodePath = t.lookup.InternalPath(trashNodeID) - - owner := &userpb.UserId{} - // lookup ownerId in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.OwnerIDAttr); err == nil { - owner.OpaqueId = attrStr - } else { - return +// TODO refactor the returned params into Node properties? would make all the path transformations go away... +func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) { + if key == "" { + return nil, "", "", "", errtypes.InternalError("key is empty") } - // lookup ownerIdp in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.OwnerIDPAttr); err == nil { - owner.Idp = attrStr + + var nodeID, timeSuffix string + + trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2), path) + if path == "" || path == "/" { + nodeID, timeSuffix, err = readTrashLink(trashItem) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") + return + } + deletedNodePath = filepath.Join(t.lookup.InternalPath(spaceID, nodeID) + node.TrashIDDelimiter + timeSuffix) } else { - return + // children of a trashed node are in the nodes folder + nodeID, err = readTrashChildLink(trashItem) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash child link") + return + } + deletedNodePath = t.lookup.InternalPath(spaceID, nodeID) } - // lookup ownerType in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.OwnerTypeAttr); err == nil { - owner.Type = utils.UserTypeMap(attrStr) - } else { + + recycleNode = node.New(spaceID, nodeID, "", "", 0, "", nil, t.lookup) + recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID) + if err != nil { return } - recycleNode = node.New(trashNodeID, "", "", 0, "", owner, t.lookup) + var attrStr string // lookup blobID in extended attributes if attrStr, err = xattrs.Get(deletedNodePath, xattrs.BlobIDAttr); err == nil { recycleNode.BlobID = attrStr @@ -843,38 +848,15 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceid, key, path string) ( return } - // look up space root from the trashed node - err = recycleNode.FindStorageSpaceRoot() - - if path == "" || path == "/" { - parts := strings.SplitN(filepath.Base(link), node.TrashIDDelimiter, 2) - if len(parts) != 2 { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Interface("parts", parts).Msg("malformed trash link") - return - } - // update the node id, drop the `.T.{timestamp}` suffix - recycleNode.ID = parts[0] - } - // get origin node, is relative to space root origin = "/" - deletedNodeRootPath := deletedNodePath - if path != "" && path != "/" { - trashItemRoot := filepath.Join(t.lookup.InternalRoot(), "trash", spaceid, key) - var rootLink string - rootLink, err = os.Readlink(trashItemRoot) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return - } - deletedNodeRootPath = t.lookup.InternalPath(filepath.Base(rootLink)) - } + trashRootItemPath := filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2)) // lookup origin path in extended attributes - if attrStr, err = xattrs.Get(deletedNodeRootPath, xattrs.TrashOriginAttr); err == nil { + if attrStr, err = xattrs.Get(trashRootItemPath, xattrs.TrashOriginAttr); err == nil { origin = filepath.Join(attrStr, path) } else { - log.Error().Err(err).Str("trashItem", trashItem).Str("link", link).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") + log.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") } return diff --git a/pkg/storage/utils/decomposedfs/tree/tree_test.go b/pkg/storage/utils/decomposedfs/tree/tree_test.go index 09d79f072e..72e61dcdac 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree_test.go +++ b/pkg/storage/utils/decomposedfs/tree/tree_test.go @@ -23,6 +23,7 @@ import ( "path" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" @@ -106,7 +107,7 @@ var _ = Describe("Tree", func() { }) It("moves the file to the trash", func() { - trashPath := path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath := path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) _, err := os.Stat(trashPath) Expect(err).ToNot(HaveOccurred()) }) @@ -117,7 +118,7 @@ var _ = Describe("Tree", func() { }) It("sets the trash origin xattr", func() { - trashPath := path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath := path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) attr, err := xattr.Get(trashPath, xattrs.TrashOriginAttr) Expect(err).ToNot(HaveOccurred()) Expect(string(attr)).To(Equal("/dir1/file1")) @@ -136,7 +137,7 @@ var _ = Describe("Tree", func() { JustBeforeEach(func() { env.Blobstore.On("Delete", n.BlobID).Return(nil) - trashPath = path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath = path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) Expect(t.Delete(env.Ctx, n)).To(Succeed()) }) @@ -238,7 +239,7 @@ var _ = Describe("Tree", func() { ) JustBeforeEach(func() { - trashPath = path.Join(env.Root, "trash", n.SpaceRoot.ID, n.ID) + trashPath = path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) Expect(t.Delete(env.Ctx, n)).To(Succeed()) }) @@ -278,7 +279,7 @@ var _ = Describe("Tree", func() { Describe("with TreeTimeAccounting enabled", func() { It("sets the tmtime of the parent", func() { - file, err := env.CreateTestFile("file1", "", 1, dir.ID) + file, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) perms := node.OwnerPermissions() @@ -296,7 +297,7 @@ var _ = Describe("Tree", func() { Describe("with TreeSizeAccounting enabled", func() { It("calculates the size", func() { - file, err := env.CreateTestFile("file1", "", 1, dir.ID) + file, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) err = env.Tree.Propagate(env.Ctx, file) @@ -307,9 +308,9 @@ var _ = Describe("Tree", func() { }) It("considers all files", func() { - _, err := env.CreateTestFile("file1", "", 1, dir.ID) + _, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) - file2, err := env.CreateTestFile("file2", "", 100, dir.ID) + file2, err := env.CreateTestFile("file2", "", dir.ID, dir.SpaceID, 100) Expect(err).ToNot(HaveOccurred()) err = env.Tree.Propagate(env.Ctx, file2) @@ -325,7 +326,7 @@ var _ = Describe("Tree", func() { err = subdir.SetTreeSize(uint64(200)) Expect(err).ToNot(HaveOccurred()) - file, err := env.CreateTestFile("file1", "", 1, dir.ID) + file, err := env.CreateTestFile("file1", "", dir.ID, dir.SpaceID, 1) Expect(err).ToNot(HaveOccurred()) err = env.Tree.Propagate(env.Ctx, file) diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index f73342ade6..617b3dae5a 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -41,6 +41,7 @@ import ( "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/utils" @@ -245,10 +246,6 @@ func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (uplo } usr := ctxpkg.ContextMustGetUser(ctx) - owner, err := p.Owner() - if err != nil { - return nil, errors.Wrap(err, "Decomposedfs: error determining owner") - } var spaceRoot string if info.Storage != nil { if spaceRoot, ok = info.Storage["SpaceRoot"]; !ok { @@ -272,9 +269,6 @@ func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (uplo "UserType": utils.UserTypeToString(usr.Id.Type), "UserName": usr.Username, - "OwnerIdp": owner.Idp, - "OwnerId": owner.OpaqueId, - "LogLevel": log.GetLevel().String(), } // Create binary file in the upload folder with no content @@ -459,7 +453,9 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { return } + spaceID := upload.info.Storage["SpaceRoot"] n := node.New( + spaceID, upload.info.Storage["NodeId"], upload.info.Storage["NodeParentId"], upload.info.Storage["NodeName"], @@ -468,7 +464,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { nil, upload.fs.lu, ) - n.SpaceRoot = node.New(upload.info.Storage["SpaceRoot"], "", "", 0, "", nil, upload.fs.lu) + n.SpaceRoot = node.New(spaceID, spaceID, "", "", 0, "", nil, upload.fs.lu) // check lock if err := n.CheckLock(ctx); err != nil { @@ -556,7 +552,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { // FIXME move versioning to blobs ... no need to copy all the metadata! well ... it does if we want to version metadata... // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath = upload.fs.lu.InternalPath(n.ID + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) + versionsPath = upload.fs.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+fi.ModTime().UTC().Format(time.RFC3339Nano)) // This move drops all metadata!!! We copy it below with CopyMetadata // FIXME the node must remain the same. otherwise we might restore share metadata @@ -589,9 +585,11 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { Msg("Decomposedfs: could not truncate") return } + if err := os.MkdirAll(filepath.Dir(targetPath), 0700); err != nil { + sublog.Warn().Err(err).Msg("Decomposedfs: could not create node dir, trying to write file anyway") + } if err = os.Rename(upload.binPath, targetPath); err != nil { - sublog.Err(err). - Msg("Decomposedfs: could not rename") + sublog.Error().Err(err).Msg("Decomposedfs: could not rename") return } if versionsPath != "" { @@ -618,16 +616,13 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { tryWritingChecksum(&sublog, n, "adler32", adler32h) // who will become the owner? the owner of the parent actually ... not the currently logged in user - err = n.WriteAllNodeMetadata(&userpb.UserId{ - Idp: upload.info.Storage["OwnerIdp"], - OpaqueId: upload.info.Storage["OwnerId"], - }) + err = n.WriteAllNodeMetadata() if err != nil { return errors.Wrap(err, "Decomposedfs: could not write metadata") } // link child name to parent if it is new - childNameLink := filepath.Join(upload.fs.lu.InternalPath(n.ParentID), n.Name) + childNameLink := filepath.Join(n.ParentInternalPath(), n.Name) var link string link, err = os.Readlink(childNameLink) if err == nil && link != "../"+n.ID { @@ -642,7 +637,8 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { } } if os.IsNotExist(err) || link != "../"+n.ID { - if err = os.Symlink("../"+n.ID, childNameLink); err != nil { + relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2)) + if err = os.Symlink(relativeNodePath, childNameLink); err != nil { return errors.Wrap(err, "Decomposedfs: could not symlink child entry") } } diff --git a/pkg/storage/utils/decomposedfs/upload_test.go b/pkg/storage/utils/decomposedfs/upload_test.go index 074cd68e96..70735e5052 100644 --- a/pkg/storage/utils/decomposedfs/upload_test.go +++ b/pkg/storage/utils/decomposedfs/upload_test.go @@ -26,12 +26,14 @@ import ( "os" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" ruser "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/mocks" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" @@ -55,16 +57,17 @@ var _ = Describe("File uploads", func() { ctx context.Context spaceID string - o *options.Options - lookup *decomposedfs.Lookup - permissions *mocks.PermissionsChecker - bs *treemocks.Blobstore + o *options.Options + lu *lookup.Lookup + permissions *mocks.PermissionsChecker + cs3permissionsclient *mocks.CS3PermissionsClient + bs *treemocks.Blobstore ) BeforeEach(func() { ref = &provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: "userid", + StorageId: "u-s-e-r-id", }, Path: "/foo", } @@ -72,7 +75,7 @@ var _ = Describe("File uploads", func() { user = &userpb.User{ Id: &userpb.UserId{ Idp: "idp", - OpaqueId: "userid", + OpaqueId: "u-s-e-r-id", Type: userpb.UserType_USER_TYPE_PRIMARY, }, Username: "username", @@ -80,7 +83,7 @@ var _ = Describe("File uploads", func() { rootRef = &provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: "userid", + StorageId: "u-s-e-r-id", }, Path: "/", } @@ -94,8 +97,9 @@ var _ = Describe("File uploads", func() { "root": tmpRoot, }) Expect(err).ToNot(HaveOccurred()) - lookup = &decomposedfs.Lookup{Options: o} + lu = &lookup.Lookup{Options: o} permissions = &mocks.PermissionsChecker{} + cs3permissionsclient = &mocks.CS3PermissionsClient{} bs = &treemocks.Blobstore{} }) @@ -107,10 +111,12 @@ var _ = Describe("File uploads", func() { }) BeforeEach(func() { - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Times(1).Return(true, nil) + cs3permissionsclient.On("CheckPermission", mock.Anything, mock.Anything, mock.Anything).Return(&cs3permissions.CheckPermissionResponse{ + Status: &v1beta11.Status{Code: v1beta11.Code_CODE_OK}, + }, nil) var err error - tree := tree.New(o.Root, true, true, lookup, bs) - fs, err = decomposedfs.New(o, lookup, permissions, tree) + tree := tree.New(o.Root, true, true, lu, bs) + fs, err = decomposedfs.New(o, lu, permissions, tree, cs3permissionsclient) Expect(err).ToNot(HaveOccurred()) resp, err := fs.CreateStorageSpace(ctx, &provider.CreateStorageSpaceRequest{Owner: user, Type: "personal"}) @@ -141,7 +147,7 @@ var _ = Describe("File uploads", func() { When("the user wants to initiate a file upload", func() { It("fails", func() { - msg := "error: permission denied: userid/foo" + msg := "error: permission denied: u-s-e-r-id/foo" _, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) Expect(err).To(MatchError(msg)) }) @@ -152,7 +158,7 @@ var _ = Describe("File uploads", func() { JustBeforeEach(func() { var err error // the space name attribute is the stop condition in the lookup - h, err := lookup.RootNode(ctx) + h, err := lu.NodeFromResource(ctx, rootRef) Expect(err).ToNot(HaveOccurred()) err = xattr.Set(h.InternalPath(), xattrs.SpaceNameAttr, []byte("username")) Expect(err).ToNot(HaveOccurred()) @@ -161,7 +167,7 @@ var _ = Describe("File uploads", func() { When("the user wants to initiate a file upload", func() { It("fails", func() { - msg := "error: permission denied: userid/foo" + msg := "error: permission denied: u-s-e-r-id/foo" _, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) Expect(err).To(MatchError(msg)) }) diff --git a/tests/acceptance/expected-failures-on-OCIS-storage.md b/tests/acceptance/expected-failures-on-OCIS-storage.md index 089ea06169..9ec1ac111a 100644 --- a/tests/acceptance/expected-failures-on-OCIS-storage.md +++ b/tests/acceptance/expected-failures-on-OCIS-storage.md @@ -480,8 +480,6 @@ The first two tests work against ocis. There must be something wrong in the CI s - [apiSharePublicLink1/createPublicLinkShare.feature:375](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L375) - [apiSharePublicLink1/createPublicLinkShare.feature:376](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L376) -- [apiSharePublicLink1/createPublicLinkShare.feature:477](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L477) -- [apiSharePublicLink1/createPublicLinkShare.feature:478](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L478) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:212](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L212) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:213](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L213) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:214](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L214) diff --git a/tests/acceptance/expected-failures-on-S3NG-storage.md b/tests/acceptance/expected-failures-on-S3NG-storage.md index 1ed8ede453..164294783f 100644 --- a/tests/acceptance/expected-failures-on-S3NG-storage.md +++ b/tests/acceptance/expected-failures-on-S3NG-storage.md @@ -501,8 +501,6 @@ File and sync features in a shared scenario - [apiSharePublicLink1/createPublicLinkShare.feature:375](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L375) - [apiSharePublicLink1/createPublicLinkShare.feature:376](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L376) -- [apiSharePublicLink1/createPublicLinkShare.feature:477](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L477) -- [apiSharePublicLink1/createPublicLinkShare.feature:478](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiSharePublicLink1/createPublicLinkShare.feature#L478) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:212](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L212) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:213](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L213) - [apiShareManagementBasicToShares/deleteShareFromShares.feature:214](https://github.com/owncloud/core/blob/master/tests/acceptance/features/apiShareManagementBasicToShares/deleteShareFromShares.feature#L214) diff --git a/tests/integration/grpc/fixtures/storageprovider-ocis.toml b/tests/integration/grpc/fixtures/storageprovider-ocis.toml index 49c0dd1af5..b9012140d2 100644 --- a/tests/integration/grpc/fixtures/storageprovider-ocis.toml +++ b/tests/integration/grpc/fixtures/storageprovider-ocis.toml @@ -9,5 +9,4 @@ root = "{{root}}/storage" treetime_accounting = true treesize_accounting = true enable_home = true -userprovidersvc = "localhost:18000" -gateway_addr = "{{gateway_address}}" \ No newline at end of file +permissionssvc = "{{permissions_address}}" diff --git a/tests/integration/grpc/gateway_storageprovider_test.go b/tests/integration/grpc/gateway_storageprovider_test.go index 2a3abbb5a6..3cb75a8b45 100644 --- a/tests/integration/grpc/gateway_storageprovider_test.go +++ b/tests/integration/grpc/gateway_storageprovider_test.go @@ -30,13 +30,13 @@ import ( userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" storagep "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/pkg/auth/scope" ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/fs/ocis" jwt "github.com/cs3org/reva/pkg/token/manager/jwt" + "github.com/cs3org/reva/pkg/utils" "github.com/cs3org/reva/tests/helpers" . "github.com/onsi/ginkgo/v2" @@ -177,6 +177,7 @@ var _ = Describe("gateway", func() { shard1Fs, err = ocis.New(map[string]interface{}{ "root": revads["storage"].StorageRoot, "userprovidersvc": revads["users"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -201,6 +202,7 @@ var _ = Describe("gateway", func() { shard2Fs, err = ocis.New(map[string]interface{}{ "root": revads["storage"].StorageRoot, "userprovidersvc": revads["users"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -285,14 +287,6 @@ var _ = Describe("gateway", func() { It("places new spaces in the correct shard", func() { createRes, err := serviceClient.CreateStorageSpace(ctx, &storagep.CreateStorageSpaceRequest{ - Opaque: &typesv1beta1.Opaque{ - Map: map[string]*typesv1beta1.OpaqueEntry{ - "path": { - Decoder: "plain", - Value: []byte("/projects"), - }, - }, - }, Owner: user, Type: "project", Name: "o - project", @@ -310,9 +304,9 @@ var _ = Describe("gateway", func() { Expect(err).ToNot(HaveOccurred()) Expect(listRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - _, err = os.Stat(path.Join(revads["storage"].StorageRoot, "/spaces/project", space.Id.OpaqueId)) + _, err = os.Stat(path.Join(revads["storage"].StorageRoot, "/spacetypes/project", space.Id.OpaqueId)) Expect(err).To(HaveOccurred()) - _, err = os.Stat(path.Join(revads["storage2"].StorageRoot, "/spaces/project", space.Id.OpaqueId)) + _, err = os.Stat(path.Join(revads["storage2"].StorageRoot, "/spacetypes/project", space.Id.OpaqueId)) Expect(err).ToNot(HaveOccurred()) }) @@ -347,12 +341,11 @@ var _ = Describe("gateway", func() { Context("with a basic user storage", func() { var ( - fs storage.FS - embeddedFs storage.FS - homeSpace *storagep.StorageSpace - embeddedSpace *storagep.StorageSpace - embeddedSpaceID string - embeddedRef *storagep.Reference + fs storage.FS + embeddedFs storage.FS + homeSpace *storagep.StorageSpace + embeddedSpace *storagep.StorageSpace + embeddedRef *storagep.Reference ) BeforeEach(func() { @@ -369,8 +362,7 @@ var _ = Describe("gateway", func() { var err error fs, err = ocis.New(map[string]interface{}{ "root": revads["storage"].StorageRoot, - "userprovidersvc": revads["users"].GrpcAddress, - "gateway_addr": revads["gateway"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -395,6 +387,7 @@ var _ = Describe("gateway", func() { embeddedFs, err = ocis.New(map[string]interface{}{ "root": revads["storage2"].StorageRoot, "userprovidersvc": revads["users"].GrpcAddress, + "permissionssvc": revads["permissions"].GrpcAddress, "enable_home": true, "treesize_accounting": true, "treetime_accounting": true, @@ -421,7 +414,6 @@ var _ = Describe("gateway", func() { []byte("22"), ) Expect(err).ToNot(HaveOccurred()) - embeddedSpaceID = embeddedSpace.Id.OpaqueId }) Describe("ListContainer", func() { @@ -480,21 +472,23 @@ var _ = Describe("gateway", func() { info := statRes.Info Expect(info.Type).To(Equal(storagep.ResourceType_RESOURCE_TYPE_CONTAINER)) - Expect(info.Path).To(Equal(user.Id.OpaqueId)) + Expect(utils.ResourceIDEqual(info.Id, homeRef.ResourceId)).To(BeTrue()) + Expect(info.Path).To(Equal("")) // path of a root node of a space is always "" Expect(info.Owner.OpaqueId).To(Equal(user.Id.OpaqueId)) // TODO: size aggregating is done by the client now - so no chance testing that here // Expect(info.Size).To(Equal(uint64(3))) // home: 1, embedded: 2 }) - It("stats the embedded space", func() { + It("stats the root of embedded space", func() { statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: embeddedRef}) Expect(err).ToNot(HaveOccurred()) Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) info := statRes.Info Expect(info.Type).To(Equal(storagep.ResourceType_RESOURCE_TYPE_CONTAINER)) - Expect(info.Path).To(Equal(embeddedSpaceID)) + Expect(utils.ResourceIDEqual(info.Id, embeddedRef.ResourceId)).To(BeTrue()) + Expect(info.Path).To(Equal("")) // path of a root node of a space is always "" Expect(info.Size).To(Equal(uint64(2))) }) diff --git a/tests/integration/grpc/grpc_suite_test.go b/tests/integration/grpc/grpc_suite_test.go index 042725da66..b9bbc7fe9a 100644 --- a/tests/integration/grpc/grpc_suite_test.go +++ b/tests/integration/grpc/grpc_suite_test.go @@ -58,7 +58,7 @@ type Revad struct { Cleanup cleanupFunc // Function to kill the process and cleanup the temp. root. If the given parameter is true the files will be kept to make debugging failures easier. } -// stardRevads takes a list of revad configuration files plus a map of +// startRevads takes a list of revad configuration files plus a map of // variables that need to be substituted in them and starts them. // // A unique port is assigned to each spawned instance. @@ -152,7 +152,7 @@ func startRevads(configs map[string]string, variables map[string]string) (map[st } // even the port is open the service might not be available yet - time.Sleep(1 * time.Second) + time.Sleep(2 * time.Second) revad := &Revad{ TmpRoot: tmpRoot, diff --git a/tests/integration/grpc/storageprovider_test.go b/tests/integration/grpc/storageprovider_test.go index 2679c98b19..5656619f64 100644 --- a/tests/integration/grpc/storageprovider_test.go +++ b/tests/integration/grpc/storageprovider_test.go @@ -30,6 +30,7 @@ import ( ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/fs/nextcloud" "github.com/cs3org/reva/pkg/storage/fs/ocis" jwt "github.com/cs3org/reva/pkg/token/manager/jwt" "github.com/cs3org/reva/pkg/utils" @@ -59,11 +60,12 @@ func createFS(provider string, revads map[string]*Revad) (storage.FS, error) { switch provider { case "ocis": conf["root"] = revads["storage"].StorageRoot + conf["permissionssvc"] = revads["permissions"].GrpcAddress f = ocis.New case "nextcloud": - conf["root"] = revads["storage"].StorageRoot - conf["enable_home"] = true - f = ocis.New + conf["endpoint"] = "http://localhost:8080/apps/sciencemesh/" + conf["mock_http"] = true + f = nextcloud.New } return f(conf) } @@ -130,19 +132,19 @@ var _ = Describe("storage providers", func() { assertCreateHome := func(provider string) { It("creates a home directory", func() { homeRef := ref(provider, homePath) - _, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: homeRef}) + statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: homeRef}) Expect(err).ToNot(HaveOccurred()) - // Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_NOT_FOUND)) + Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_NOT_FOUND)) res, err := serviceClient.CreateStorageSpace(ctx, &storagep.CreateStorageSpaceRequest{ Owner: user, Type: "personal", Name: user.Id.OpaqueId, }) - Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) Expect(err).ToNot(HaveOccurred()) + Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: ref(provider, homePath)}) + statRes, err = serviceClient.Stat(ctx, &storagep.StatRequest{Ref: ref(provider, homePath)}) Expect(err).ToNot(HaveOccurred()) Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) @@ -705,8 +707,6 @@ var _ = Describe("storage providers", func() { vRef.ResourceId = &storagep.ResourceId{StorageId: user.Id.OpaqueId} } - ctx := ctxpkg.ContextSetUser(context.Background(), user) - _, err = fs.CreateStorageSpace(ctx, &storagep.CreateStorageSpaceRequest{ Owner: user, Type: "personal", @@ -729,7 +729,8 @@ var _ = Describe("storage providers", func() { }) suite("ocis", map[string]string{ - "storage": "storageprovider-ocis.toml", + "storage": "storageprovider-ocis.toml", + "permissions": "permissions-ocis-ci.toml", }) }) diff --git a/tests/oc-integration-tests/drone/storage-users-0-9.toml b/tests/oc-integration-tests/drone/storage-users-0-9.toml index 5fc9255a41..805d57d2a1 100644 --- a/tests/oc-integration-tests/drone/storage-users-0-9.toml +++ b/tests/oc-integration-tests/drone/storage-users-0-9.toml @@ -26,6 +26,7 @@ data_server_url = "http://revad-services:11001/data" root = "/drone/src/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/drone/src/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/drone/storage-users-a-f.toml b/tests/oc-integration-tests/drone/storage-users-a-f.toml index 2a868acd93..e72c368f0e 100644 --- a/tests/oc-integration-tests/drone/storage-users-a-f.toml +++ b/tests/oc-integration-tests/drone/storage-users-a-f.toml @@ -26,6 +26,7 @@ data_server_url = "http://revad-services:11011/data" root = "/drone/src/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/drone/src/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/drone/storage-users-ocis.toml b/tests/oc-integration-tests/drone/storage-users-ocis.toml index 0cb5a88f86..ed175d2850 100644 --- a/tests/oc-integration-tests/drone/storage-users-ocis.toml +++ b/tests/oc-integration-tests/drone/storage-users-ocis.toml @@ -17,13 +17,12 @@ address = "0.0.0.0:11000" driver = "ocis" expose_data_server = true data_server_url = "http://revad-services:11001/data" -gateway_addr = "0.0.0.0:19000" [grpc.services.storageprovider.drivers.ocis] root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true -gateway_addr = "0.0.0.0:19000" +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -37,3 +36,4 @@ temp_folder = "/drone/src/tmp/reva/tmp" root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/drone/storage-users-s3ng.toml b/tests/oc-integration-tests/drone/storage-users-s3ng.toml index 90d98068ae..88b707ae45 100644 --- a/tests/oc-integration-tests/drone/storage-users-s3ng.toml +++ b/tests/oc-integration-tests/drone/storage-users-s3ng.toml @@ -21,6 +21,7 @@ data_server_url = "http://revad-services:11001/data" root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" "s3.endpoint" = "http://ceph:8080" "s3.region" = "default" "s3.bucket" = "test" @@ -39,6 +40,7 @@ temp_folder = "/drone/src/tmp/reva/tmp" root = "/drone/src/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" "s3.endpoint" = "http://ceph:8080" "s3.region" = "default" "s3.bucket" = "test" diff --git a/tests/oc-integration-tests/local/ldap-users.toml b/tests/oc-integration-tests/local/ldap-users.toml index 65e2d00f6a..b853c480c3 100644 --- a/tests/oc-integration-tests/local/ldap-users.toml +++ b/tests/oc-integration-tests/local/ldap-users.toml @@ -16,7 +16,7 @@ address = "0.0.0.0:18000" auth_manager = "ldap" [grpc.services.authprovider.auth_managers.ldap] -hostname="localhost" +hostname="ldap" port=636 insecure=true base_dn="dc=owncloud,dc=com" @@ -34,7 +34,7 @@ cn="cn" driver = "ldap" [grpc.services.userprovider.drivers.ldap] -hostname="localhost" +hostname="ldap" port=636 insecure=true base_dn="dc=owncloud,dc=com" @@ -57,7 +57,7 @@ gid="entryuuid" driver = "ldap" [grpc.services.groupprovider.drivers.ldap] -hostname="localhost" +hostname="ldap" port=636 insecure=true base_dn="dc=owncloud,dc=com" diff --git a/tests/oc-integration-tests/local/storage-users-0-9.toml b/tests/oc-integration-tests/local/storage-users-0-9.toml index 89c7975c7b..bd79f5917f 100644 --- a/tests/oc-integration-tests/local/storage-users-0-9.toml +++ b/tests/oc-integration-tests/local/storage-users-0-9.toml @@ -26,6 +26,7 @@ data_server_url = "http://localhost:11001/data" root = "/var/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/var/tmp/reva/data-0-9" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/local/storage-users-a-f.toml b/tests/oc-integration-tests/local/storage-users-a-f.toml index 18f201df34..f4941fd7e2 100644 --- a/tests/oc-integration-tests/local/storage-users-a-f.toml +++ b/tests/oc-integration-tests/local/storage-users-a-f.toml @@ -26,6 +26,7 @@ data_server_url = "http://localhost:11011/data" root = "/var/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,3 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/var/tmp/reva/data-a-f" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" diff --git a/tests/oc-integration-tests/local/storage-users.toml b/tests/oc-integration-tests/local/storage-users.toml index 350cd8ef6e..0c50bcb3d8 100644 --- a/tests/oc-integration-tests/local/storage-users.toml +++ b/tests/oc-integration-tests/local/storage-users.toml @@ -26,6 +26,7 @@ data_server_url = "http://localhost:11001/data" root = "/var/tmp/reva/data" treetime_accounting = true treesize_accounting = true +permissionssvc = "localhost:10000" # we have a locally running dataprovider [http] @@ -39,4 +40,4 @@ temp_folder = "/var/tmp/reva/tmp" root = "/var/tmp/reva/data" treetime_accounting = true treesize_accounting = true -gateway_addr = "0.0.0.0:19000" +permissionssvc = "localhost:10000" From 2fde139dfb192093cb66121e7347cb207e2ab14d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Thu, 24 Feb 2022 16:47:15 +0000 Subject: [PATCH 2/7] fix space disable/reenable/purge MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- pkg/storage/utils/decomposedfs/grants.go | 2 +- pkg/storage/utils/decomposedfs/metadata.go | 18 +- pkg/storage/utils/decomposedfs/node/node.go | 55 +++++- .../utils/decomposedfs/node/node_test.go | 3 +- pkg/storage/utils/decomposedfs/spaces.go | 165 +++++------------- pkg/storage/utils/decomposedfs/tree/tree.go | 4 +- .../utils/decomposedfs/xattrs/xattrs.go | 14 +- 7 files changed, 114 insertions(+), 147 deletions(-) diff --git a/pkg/storage/utils/decomposedfs/grants.go b/pkg/storage/utils/decomposedfs/grants.go index 40e6199500..f80a0d0344 100644 --- a/pkg/storage/utils/decomposedfs/grants.go +++ b/pkg/storage/utils/decomposedfs/grants.go @@ -170,7 +170,7 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference attr = xattrs.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId } - if err = xattr.Remove(node.InternalPath(), attr); err != nil { + if err = xattrs.Remove(node.InternalPath(), attr); err != nil { return } diff --git a/pkg/storage/utils/decomposedfs/metadata.go b/pkg/storage/utils/decomposedfs/metadata.go index f45c9ea216..39499a6c5b 100644 --- a/pkg/storage/utils/decomposedfs/metadata.go +++ b/pkg/storage/utils/decomposedfs/metadata.go @@ -22,7 +22,6 @@ import ( "context" "fmt" "path/filepath" - "syscall" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" @@ -181,8 +180,8 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide continue } fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Remove(nodePath, fa); err != nil { - if isNoData(err) { + if err := xattrs.Remove(nodePath, fa); err != nil { + if xattrs.IsAttrUnset(err) { // TODO align with default case: is there a difference between darwin and linux? // refactor this properly into a function in the "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" package continue // already gone, ignore @@ -194,7 +193,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) } default: - if err = xattr.Remove(nodePath, xattrs.MetadataPrefix+k); err != nil { + if err = xattrs.Remove(nodePath, xattrs.MetadataPrefix+k); err != nil { // a non-existing attribute will return an error, which we can ignore // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || @@ -220,14 +219,3 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide return errors.New("multiple errors occurred, see log for details") } } - -// The os ENODATA error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isNoData(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENODATA - } - } - return false -} diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index 56ee9dc3ec..ec1b2dda29 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -138,7 +138,7 @@ func (n *Node) SetMetadata(key string, val string) (err error) { // RemoveMetadata removes a given key func (n *Node) RemoveMetadata(key string) (err error) { - if err = xattr.Remove(n.InternalPath(), key); err != nil { + if err = xattrs.Remove(n.InternalPath(), key); err != nil { if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || // darwin e.Err.Error() == "attribute not found") { @@ -187,6 +187,7 @@ func (n *Node) WriteOwner(owner *userpb.UserId) error { } // ReadNode creates a new instance from an id and checks if it exists +// FIXME check if user is allowed to access disabled spaces func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string) (n *Node, err error) { // read space root @@ -279,6 +280,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string) (n *No // - no need to write an additional trash entry // - can be made more robust with a journal // - same recursion mechanism can be used to purge items? sth we still need to do + // - flag the two above options with dtime _, err = os.Stat(n.ParentInternalPath()) if err != nil { if os.IsNotExist(err) { @@ -835,11 +837,49 @@ func (n *Node) GetTMTime() (tmTime time.Time, err error) { return time.Parse(time.RFC3339Nano, b) } -// SetTMTime writes the tmtime to the extended attributes -func (n *Node) SetTMTime(t time.Time) (err error) { +// SetTMTime writes the UTC tmtime to the extended attributes or removes the attribute if nil is passed +func (n *Node) SetTMTime(t *time.Time) (err error) { + if t == nil { + err = xattrs.Remove(n.InternalPath(), xattrs.TreeMTimeAttr) + if xattrs.IsAttrUnset(err) { + return nil + } + return err + } return xattrs.Set(n.InternalPath(), xattrs.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) } +// GetDTime reads the dtime from the extended attributes +func (n *Node) GetDTime() (tmTime time.Time, err error) { + var b string + if b, err = xattrs.Get(n.InternalPath(), xattrs.DTimeAttr); err != nil { + return + } + return time.Parse(time.RFC3339Nano, b) +} + +// SetDTime writes the UTC dtime to the extended attributes or removes the attribute if nil is passed +func (n *Node) SetDTime(t *time.Time) (err error) { + if t == nil { + err = xattrs.Remove(n.InternalPath(), xattrs.DTimeAttr) + if xattrs.IsAttrUnset(err) { + return nil + } + return err + } + return xattrs.Set(n.InternalPath(), xattrs.DTimeAttr, t.UTC().Format(time.RFC3339Nano)) +} + +// IsDisabled returns true when the node has a dmtime attribute set +// only used to check if a space is disabled +// FIXME confusing with the trash logic +func (n *Node) IsDisabled() bool { + if _, err := n.GetDTime(); err == nil { + return true + } + return false +} + // GetTreeSize reads the treesize from the extended attributes func (n *Node) GetTreeSize() (treesize uint64, err error) { var b string @@ -861,12 +901,9 @@ func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { // UnsetTempEtag removes the temporary etag attribute func (n *Node) UnsetTempEtag() (err error) { - if err = xattr.Remove(n.InternalPath(), xattrs.TmpEtagAttr); err != nil { - if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - return nil - } + err = xattrs.Remove(n.InternalPath(), xattrs.TmpEtagAttr) + if xattrs.IsAttrUnset(err) { + return nil } return err } diff --git a/pkg/storage/utils/decomposedfs/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_test.go index d8befdc85e..5b1582f5be 100644 --- a/pkg/storage/utils/decomposedfs/node/node_test.go +++ b/pkg/storage/utils/decomposedfs/node/node_test.go @@ -196,7 +196,8 @@ var _ = Describe("Node", func() { Expect(len(ri.Etag)).To(Equal(34)) before := ri.Etag - Expect(n.SetTMTime(time.Now().UTC())).To(Succeed()) + tmtime := time.Now() + Expect(n.SetTMTime(&tmtime)).To(Succeed()) ri, err = n.AsResourceInfo(env.Ctx, &perms, []string{}, false) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index 9fb6b9f028..6b8c158c60 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -104,28 +104,31 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr } } - // always enable propagation on the storage space root - // mark the space root node as the end of propagation - if err = root.SetMetadata(xattrs.PropagationAttr, "1"); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", root).Msg("could not mark space root node to propagate") - return nil, err - } - err = fs.linkStorageSpaceType(ctx, req.Type, root.ID) if err != nil { return nil, err } metadata := make(map[string]string, 3) + + // always enable propagation on the storage space root + // mark the space root node as the end of propagation + metadata[xattrs.PropagationAttr] = "1" + metadata[xattrs.SpaceNameAttr] = req.Name + + if req.Type != "" { + metadata[xattrs.SpaceTypeAttr] = req.Type + } + if q := req.GetQuota(); q != nil { // set default space quota metadata[xattrs.QuotaAttr] = strconv.FormatUint(q.QuotaMaxBytes, 10) } - metadata[xattrs.SpaceNameAttr] = req.Name if description != "" { metadata[xattrs.SpaceDescriptionAttr] = description } + if err := xattrs.SetMultiple(root.InternalPath(), metadata); err != nil { return nil, err } @@ -152,7 +155,7 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr } } - space, err := fs.storageSpaceFromNode(ctx, root, req.Type, root.InternalPath(), false) + space, err := fs.storageSpaceFromNode(ctx, root, root.InternalPath(), false) if err != nil { return nil, err } @@ -213,6 +216,7 @@ func readSpaceAndNodeFromSpaceTypeLink(path string) (string, string, error) { return "", "", err } // ../../spaces/4c/510ada-c86b-4815-8820-42cdf82c3d51/nodes/4c/51/0a/da/-c86b-4815-8820-42cdf82c3d51 + // ../../spaces/4c/510ada-c86b-4815-8820-42cdf82c3d51/nodes/4c/51/0a/da/-c86b-4815-8820-42cdf82c3d51.T.2022-02-24T12:35:18.196484592Z // TODO use filepath.Separator to support windows link = strings.ReplaceAll(link, "/", "") // ....spaces4c510ada-c86b-4815-8820-42cdf82c3d51nodes4c510ada-c86b-4815-8820-42cdf82c3d51 @@ -285,7 +289,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide // return empty list return spaces, nil } - space, err := fs.storageSpaceFromNode(ctx, n, spaceTypeAny, n.InternalPath(), canListAllSpaces) + space, err := fs.storageSpaceFromNode(ctx, n, n.InternalPath(), canListAllSpaces) if err != nil { return nil, err } @@ -347,7 +351,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide } // TODO apply more filters - space, err := fs.storageSpaceFromNode(ctx, n, spaceType, matches[i], canListAllSpaces) + space, err := fs.storageSpaceFromNode(ctx, n, matches[i], canListAllSpaces) if err != nil { if _, ok := err.(errtypes.IsPermissionDenied); !ok { appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not convert to storage space") @@ -365,7 +369,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide return nil, err } if n.Exists { - space, err := fs.storageSpaceFromNode(ctx, n, "*", n.InternalPath(), canListAllSpaces) + space, err := fs.storageSpaceFromNode(ctx, n, n.InternalPath(), canListAllSpaces) if err != nil { return nil, err } @@ -385,50 +389,19 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } space := req.StorageSpace - spaceID, nodeID, _ := utils.SplitStorageSpaceID(space.Id.OpaqueId) - - if restore { - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spacetypes", spaceTypeAny, nodeID)) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return &provider.UpdateStorageSpaceResponse{ - Status: &v1beta11.Status{ - Code: v1beta11.Code_CODE_NOT_FOUND, - Message: fmt.Sprintf("restoring space failed: found %d matching spaces", len(matches)), - }, - }, nil - - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - nodeID := strings.TrimLeft(target, "/.") - nodeID = strings.ReplaceAll(nodeID, "/", "") - n, err := node.ReadNode(ctx, fs.lu, spaceID, nodeID) - if err != nil { - return nil, err - } - - newnode := *n - newnode.Name = strings.Split(n.Name, node.TrashIDDelimiter)[0] - newnode.Exists = false - - err = fs.tp.Move(ctx, n, &newnode) - if err != nil { - return nil, err - } - } + spaceID, _, _ := utils.SplitStorageSpaceID(space.Id.OpaqueId) node, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID) if err != nil { return nil, err } + if restore { + if err := node.SetDTime(nil); err != nil { + return nil, err + } + } + u, ok := ctxpkg.ContextGetUser(ctx) if !ok { return nil, fmt.Errorf("decomposedfs: spaces: contextual user not found") @@ -495,7 +468,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } // send back the updated data from the storage - updatedSpace, err := fs.storageSpaceFromNode(ctx, node, "*", node.InternalPath(), false) + updatedSpace, err := fs.storageSpaceFromNode(ctx, node, node.InternalPath(), false) if err != nil { return nil, err } @@ -522,69 +495,33 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De } if purge { - if !strings.Contains(n.Name, node.TrashIDDelimiter) { + if !n.IsDisabled() { return errtypes.NewErrtypeFromStatus(status.NewInvalidArg(ctx, "can't purge enabled space")) } - ip := n.InternalPath() - matches, err := filepath.Glob(ip) - if err != nil { - return err - } - - // TODO: remove blobs - if err := os.RemoveAll(matches[0]); err != nil { - return err - } - matches, err = filepath.Glob(filepath.Join(fs.o.Root, "spacetypes", spaceTypeAny, req.Id.OpaqueId)) + spaceType, err := n.GetMetadata(xattrs.SpaceTypeAttr) if err != nil { return err } - if len(matches) != 1 { - return fmt.Errorf("delete space failed: found %d matching spaces", len(matches)) - } - - if err := os.RemoveAll(matches[0]); err != nil { + // remove type index + spaceTypePath := filepath.Join(fs.o.Root, "spacetypes", spaceType, spaceID) + if err := os.Remove(spaceTypePath); err != nil { return err } - matches, err = filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceID, "nodes", node.RootID, req.Id.OpaqueId+node.TrashIDDelimiter+"*")) - if err != nil { + // remove space metadata + if err := os.RemoveAll(filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2))); err != nil { return err } - if len(matches) != 1 { - return fmt.Errorf("delete root node failed: found %d matching root nodes", len(matches)) - } - return os.RemoveAll(matches[0]) - } - - // don't delete - just rename - dn := *n - deletionTime := time.Now().UTC().Format(time.RFC3339Nano) - dn.Name = n.Name + node.TrashIDDelimiter + deletionTime - dn.Exists = false - err = fs.tp.Move(ctx, n, &dn) - if err != nil { - return err - } - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spacetypes", spaceTypeAny, spaceID)) - if err != nil { - return err - } - - if len(matches) != 1 { - return fmt.Errorf("delete space failed: found %d matching spaces", len(matches)) - } + // FIXME remove space blobs - err = os.RemoveAll(matches[0]) - if err != nil { - return err + return nil } - trashPath := dn.InternalPath() - np := filepath.Join(filepath.Dir(matches[0]), filepath.Base(trashPath)) - return os.Symlink(trashPath, np) + // mark as disabled by writing a dtime attribute + dtime := time.Now() + return n.SetDTime(&dtime) } func (fs *Decomposedfs) linkStorageSpaceType(ctx context.Context, spaceType string, spaceID string) error { @@ -609,7 +546,7 @@ func (fs *Decomposedfs) linkStorageSpaceType(ctx context.Context, spaceType stri return err } -func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, spaceType, nodePath string, canListAllSpaces bool) (*provider.StorageSpace, error) { +func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, nodePath string, canListAllSpaces bool) (*provider.StorageSpace, error) { user := ctxpkg.ContextMustGetUser(ctx) if !canListAllSpaces { ok, err := node.NewPermissions(fs.lu).HasPermission(ctx, n, func(p *provider.ResourcePermissions) bool { @@ -619,7 +556,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, return nil, errtypes.PermissionDenied(fmt.Sprintf("user %s is not allowed to Stat the space %s", user.Username, n.ID)) } - if strings.Contains(n.Name, node.TrashIDDelimiter) { + if n.IsDisabled() { ok, err := node.NewPermissions(fs.lu).HasPermission(ctx, n, func(p *provider.ResourcePermissions) bool { // TODO: Which permission do I need to see the space? return p.AddGrant @@ -642,18 +579,6 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, return nil, err } - glob := filepath.Join(fs.o.Root, "spacetypes", spaceType, n.SpaceRoot.ID) - matches, err := filepath.Glob(glob) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return nil, errtypes.InternalError("expected only one match for " + glob) - } - - spaceType = filepath.Base(filepath.Dir(matches[0])) - grants, err := n.ListGrants(ctx) if err != nil { return nil, err @@ -687,17 +612,21 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, }, }, }, - Id: &provider.StorageSpaceId{OpaqueId: n.SpaceRoot.ID}, + Id: &provider.StorageSpaceId{OpaqueId: n.SpaceRoot.SpaceID}, Root: &provider.ResourceId{ - StorageId: n.SpaceRoot.ID, + StorageId: n.SpaceRoot.SpaceID, OpaqueId: n.SpaceRoot.ID, }, - Name: sname, - SpaceType: spaceType, + Name: sname, + // SpaceType is read from xattr below // Mtime is set either as node.tmtime or as fi.mtime below } - if strings.Contains(n.Name, node.TrashIDDelimiter) { + if space.SpaceType, err = n.GetMetadata(xattrs.SpaceTypeAttr); err != nil { + appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a type attribute") + } + + if n.IsDisabled() { space.Opaque.Map["trashed"] = &types.OpaqueEntry{ Decoder: "plain", Value: []byte("trashed"), diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index 10e6e05725..dba26171b3 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -146,7 +146,7 @@ func (t *Tree) moveNode(spaceID, nodeID string) error { } for _, child := range children { old := filepath.Join(t.root, "nodes", child.Name()) - new := filepath.Join(t.root, "spaces", spaceID, "nodes", child.Name()) + new := filepath.Join(t.root, "spaces", lookup.Pathify(spaceID, 1, 2), "nodes", lookup.Pathify(child.Name(), 4, 2)) if err := os.Rename(old, new); err != nil { logger.New().Error().Err(err). Str("space", spaceID). @@ -624,7 +624,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { if updateSyncTime { // update the tree time of the parent node - if err = n.SetTMTime(sTime); err != nil { + if err = n.SetTMTime(&sTime); err != nil { sublog.Error().Err(err).Time("tmtime", sTime).Msg("could not update tmtime of parent node") } else { sublog.Debug().Time("tmtime", sTime).Msg("updated tmtime of parent node") diff --git a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go index 49f8e86fa0..36f5a506c9 100644 --- a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go +++ b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go @@ -76,6 +76,11 @@ const ( // stored as a readable time.RFC3339Nano TreeMTimeAttr string = OcisPrefix + "tmtime" + // the deletion/disabled time of a space or node + // used to mark space roots as disabled + // stored as a readable time.RFC3339Nano + DTimeAttr string = OcisPrefix + "dtime" + // the size of the tree below this node, // propagated when treesize_accounting is true and // user.ocis.propagation=1 is set @@ -87,6 +92,7 @@ const ( // the name given to a storage space. It should not contain any semantics as its only purpose is to be read. SpaceNameAttr string = OcisPrefix + "space.name" + SpaceTypeAttr string = OcisPrefix + "space.type" SpaceDescriptionAttr string = OcisPrefix + "space.description" SpaceReadmeAttr string = OcisPrefix + "space.readme" SpaceImageAttr string = OcisPrefix + "space.image" @@ -185,13 +191,19 @@ func CopyMetadata(src, target string, filter func(attributeName string) bool) (e // No file locking is involved here as writing a single xattr is // considered to be atomic. func Set(filePath string, key string, val string) error { - if err := xattr.Set(filePath, key, []byte(val)); err != nil { return err } return nil } +// Remove an extended attribute key +// No file locking is involved here as writing a single xattr is +// considered to be atomic. +func Remove(filePath string, key string) error { + return xattr.Remove(filePath, key) +} + // SetMultiple allows setting multiple key value pairs at once // the changes are protected with an file lock // If the file lock can not be acquired the function returns a From f1e9c5a90b67e7f944e5c8f7e896f5a0426d75c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Fri, 25 Feb 2022 07:57:04 +0000 Subject: [PATCH 3/7] fix share spaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- pkg/storage/utils/decomposedfs/node/node.go | 42 +++++++++++---------- pkg/storage/utils/decomposedfs/spaces.go | 21 +++++++---- 2 files changed, 36 insertions(+), 27 deletions(-) diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index ec1b2dda29..14048273a7 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -200,8 +200,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string) (n *No r.owner, err = r.readOwner() switch { case xattrs.IsNotExist(err): - // this returns tr - return r, nil + return r, nil // swallow not found, the node defaults to exists = false case err != nil: return nil, err } @@ -222,40 +221,43 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string) (n *No nodePath := n.InternalPath() - var attr string // lookup name in extended attributes - if attr, err = xattrs.Get(nodePath, xattrs.NameAttr); err == nil { - n.Name = attr - } else { - return + n.Name, err = xattrs.Get(nodePath, xattrs.NameAttr) + switch { + case xattrs.IsNotExist(err): + return n, nil // swallow not found, the node defaults to exists = false + case err != nil: + return nil, err } n.Exists = true // lookup blobID in extended attributes - if attr, err = xattrs.Get(nodePath, xattrs.BlobIDAttr); err == nil { - n.BlobID = attr - } else { - return + n.BlobID, err = xattrs.Get(nodePath, xattrs.BlobIDAttr) + switch { + case xattrs.IsNotExist(err): + return n, nil // swallow not found, the node defaults to exists = false + case err != nil: + return nil, err } + // Lookup blobsize - var blobSize int64 - if blobSize, err = ReadBlobSizeAttr(nodePath); err == nil { - n.Blobsize = blobSize - } else { - return + n.Blobsize, err = ReadBlobSizeAttr(nodePath) + switch { + case xattrs.IsNotExist(err): + return n, nil // swallow not found, the node defaults to exists = false + case err != nil: + return nil, err } // lookup parent id in extended attributes - attr, err = xattrs.Get(nodePath, xattrs.ParentidAttr) + n.ParentID, err = xattrs.Get(nodePath, xattrs.ParentidAttr) switch { - case err == nil: - n.ParentID = attr case xattrs.IsAttrUnset(err): return nil, errtypes.InternalError(err.Error()) case xattrs.IsNotExist(err): return n, nil // swallow not found, the node defaults to exists = false - default: + case err != nil: return nil, errtypes.InternalError(err.Error()) } diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index 6b8c158c60..96cd7f3fa9 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -341,6 +341,10 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide continue } + if !n.Exists { + continue + } + spaceType := filepath.Base(filepath.Dir(matches[i])) // FIXME type share evolved to grant on the edge branch ... make it configurable if the driver should support them or not for now ... ignore type share @@ -556,7 +560,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, return nil, errtypes.PermissionDenied(fmt.Sprintf("user %s is not allowed to Stat the space %s", user.Username, n.ID)) } - if n.IsDisabled() { + if n.SpaceRoot.IsDisabled() { ok, err := node.NewPermissions(fs.lu).HasPermission(ctx, n, func(p *provider.ResourcePermissions) bool { // TODO: Which permission do I need to see the space? return p.AddGrant @@ -570,15 +574,18 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, var err error // TODO apply more filters var sname string - if sname, err = n.GetMetadata(xattrs.SpaceNameAttr); err != nil { + if sname, err = n.SpaceRoot.GetMetadata(xattrs.SpaceNameAttr); err != nil { // FIXME: Is that a severe problem? appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a name attribute") } - if err := n.FindStorageSpaceRoot(); err != nil { - return nil, err - } + /* + if err := n.FindStorageSpaceRoot(); err != nil { + return nil, err + } + */ + // read the grants from the current node, not the root grants, err := n.ListGrants(ctx) if err != nil { return nil, err @@ -622,11 +629,11 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, // Mtime is set either as node.tmtime or as fi.mtime below } - if space.SpaceType, err = n.GetMetadata(xattrs.SpaceTypeAttr); err != nil { + if space.SpaceType, err = n.SpaceRoot.GetMetadata(xattrs.SpaceTypeAttr); err != nil { appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a type attribute") } - if n.IsDisabled() { + if n.SpaceRoot.IsDisabled() { space.Opaque.Map["trashed"] = &types.OpaqueEntry{ Decoder: "plain", Value: []byte("trashed"), From 68ba2f8c072d01e87bed9b73f4ffcd0c16310510 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Fri, 25 Feb 2022 08:15:21 +0000 Subject: [PATCH 4/7] linter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- pkg/storage/utils/decomposedfs/spaces.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index 96cd7f3fa9..6dcc3cc0bb 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -267,7 +267,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide } } if len(spaceTypes) == 0 { - spaceTypes = []string{"*"} + spaceTypes = []string{spaceTypeAny} } canListAllSpaces := fs.canListAllSpaces(ctx) @@ -295,7 +295,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide } // filter space types for _, spaceType := range spaceTypes { - if spaceType == "*" || spaceType == space.SpaceType { + if spaceType == spaceTypeAny || spaceType == space.SpaceType { spaces = append(spaces, space) } } From c632c8e1fa80307261986cd8337fb9350efcc64c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Fri, 25 Feb 2022 13:10:41 +0000 Subject: [PATCH 5/7] decomposedfs: check grants on space root MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- pkg/storage/utils/decomposedfs/node/permissions.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/storage/utils/decomposedfs/node/permissions.go b/pkg/storage/utils/decomposedfs/node/permissions.go index b70124c74e..fdc31e8054 100644 --- a/pkg/storage/utils/decomposedfs/node/permissions.go +++ b/pkg/storage/utils/decomposedfs/node/permissions.go @@ -138,6 +138,13 @@ func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap prov } } + // for the root node + if np, err := cn.ReadUserPermissions(ctx, u); err == nil { + AddPermissions(&ap, &np) + } else { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn.ID).Msg("error reading root node permissions") + } + appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n.ID).Interface("user", u).Msg("returning agregated permissions") return ap, nil } From 69b0f36cdfbc34678e678bc9cf67b73841372529 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Mon, 28 Feb 2022 15:05:44 +0000 Subject: [PATCH 6/7] update not exists error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- pkg/storage/utils/decomposedfs/metadata.go | 17 ++++++----------- pkg/storage/utils/decomposedfs/node/locks.go | 7 ++++--- pkg/storage/utils/decomposedfs/node/node.go | 12 ++++-------- pkg/storage/utils/decomposedfs/recycle.go | 6 +++--- pkg/storage/utils/decomposedfs/revisions.go | 3 ++- pkg/storage/utils/decomposedfs/tree/tree.go | 6 ++++-- pkg/storage/utils/decomposedfs/upload.go | 15 ++++++++------- 7 files changed, 31 insertions(+), 35 deletions(-) diff --git a/pkg/storage/utils/decomposedfs/metadata.go b/pkg/storage/utils/decomposedfs/metadata.go index 39499a6c5b..c7b7edaf0c 100644 --- a/pkg/storage/utils/decomposedfs/metadata.go +++ b/pkg/storage/utils/decomposedfs/metadata.go @@ -182,8 +182,6 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) if err := xattrs.Remove(nodePath, fa); err != nil { if xattrs.IsAttrUnset(err) { - // TODO align with default case: is there a difference between darwin and linux? - // refactor this properly into a function in the "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" package continue // already gone, ignore } sublog.Error().Err(err). @@ -194,16 +192,13 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide } default: if err = xattrs.Remove(nodePath, xattrs.MetadataPrefix+k); err != nil { - // a non-existing attribute will return an error, which we can ignore - // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) - if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - sublog.Error().Err(err). - Str("key", k). - Msg("could not unset metadata") - errs = append(errs, errors.Wrap(err, "could not unset metadata")) + if xattrs.IsAttrUnset(err) { + continue // already gone, ignore } + sublog.Error().Err(err). + Str("key", k). + Msg("could not unset metadata") + errs = append(errs, errors.Wrap(err, "could not unset metadata")) } } } diff --git a/pkg/storage/utils/decomposedfs/node/locks.go b/pkg/storage/utils/decomposedfs/node/locks.go index 4b882e1ef5..c0ada2330c 100644 --- a/pkg/storage/utils/decomposedfs/node/locks.go +++ b/pkg/storage/utils/decomposedfs/node/locks.go @@ -21,6 +21,7 @@ package node import ( "context" "encoding/json" + "io/fs" "os" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" @@ -99,7 +100,7 @@ func (n Node) ReadLock(ctx context.Context) (*provider.Lock, error) { f, err := os.Open(n.LockFilePath()) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound("no lock found") } return nil, errors.Wrap(err, "Decomposedfs: could not open lock file") @@ -134,7 +135,7 @@ func (n *Node) RefreshLock(ctx context.Context, lock *provider.Lock) error { f, err := os.OpenFile(n.LockFilePath(), os.O_RDWR, os.ModeExclusive) switch { - case os.IsNotExist(err): + case errors.Is(err, fs.ErrNotExist): return errtypes.PreconditionFailed("lock does not exist") case err != nil: return errors.Wrap(err, "Decomposedfs: could not open lock file") @@ -187,7 +188,7 @@ func (n *Node) Unlock(ctx context.Context, lock *provider.Lock) error { f, err := os.OpenFile(n.LockFilePath(), os.O_RDONLY, os.ModeExclusive) switch { - case os.IsNotExist(err): + case errors.Is(err, fs.ErrNotExist): return errtypes.PreconditionFailed("lock does not exist") case err != nil: return errors.Wrap(err, "Decomposedfs: could not open lock file") diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index 14048273a7..92570c5e35 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -138,12 +138,8 @@ func (n *Node) SetMetadata(key string, val string) (err error) { // RemoveMetadata removes a given key func (n *Node) RemoveMetadata(key string) (err error) { - if err = xattrs.Remove(n.InternalPath(), key); err != nil { - if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - return nil - } + if err = xattrs.Remove(n.InternalPath(), key); err == nil || xattrs.IsAttrUnset(err) { + return nil } return err } @@ -285,7 +281,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string) (n *No // - flag the two above options with dtime _, err = os.Stat(n.ParentInternalPath()) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound(err.Error()) } return nil, err @@ -324,7 +320,7 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) { } nodeID, err := readChildNodeFromLink(filepath.Join(n.InternalPath(), name)) if err != nil { - if os.IsNotExist(err) || isNotDir(err) { + if errors.Is(err, fs.ErrNotExist) || isNotDir(err) { c := &Node{ SpaceID: spaceID, diff --git a/pkg/storage/utils/decomposedfs/recycle.go b/pkg/storage/utils/decomposedfs/recycle.go index 2be8e13695..fbf5b093b7 100644 --- a/pkg/storage/utils/decomposedfs/recycle.go +++ b/pkg/storage/utils/decomposedfs/recycle.go @@ -20,7 +20,7 @@ package decomposedfs import ( "context" - "io/fs" + iofs "io/fs" "os" "path/filepath" "strings" @@ -109,7 +109,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference f, err := os.Open(trashItemPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, iofs.ErrNotExist) { return items, nil } return nil, errors.Wrapf(err, "recycle: error opening trashItemPath %s", trashItemPath) @@ -152,7 +152,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference return items, nil } -func (fs *Decomposedfs) createTrashItem(ctx context.Context, md fs.FileInfo, key string, deletionTime *types.Timestamp) (*provider.RecycleItem, error) { +func (fs *Decomposedfs) createTrashItem(ctx context.Context, md iofs.FileInfo, key string, deletionTime *types.Timestamp) (*provider.RecycleItem, error) { item := &provider.RecycleItem{ Type: getResourceType(md.IsDir()), diff --git a/pkg/storage/utils/decomposedfs/revisions.go b/pkg/storage/utils/decomposedfs/revisions.go index 912122566d..91cf568e06 100644 --- a/pkg/storage/utils/decomposedfs/revisions.go +++ b/pkg/storage/utils/decomposedfs/revisions.go @@ -21,6 +21,7 @@ package decomposedfs import ( "context" "io" + iofs "io/fs" "os" "path/filepath" "strings" @@ -132,7 +133,7 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe r, err := os.Open(contentPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, iofs.ErrNotExist) { return nil, errtypes.NotFound(contentPath) } return nil, errors.Wrap(err, "Decomposedfs: error opening revision "+revisionKey) diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index dba26171b3..a7f57c9266 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "io" + "io/fs" "os" "path/filepath" "strconv" @@ -195,6 +196,7 @@ func (t *Tree) linkSpace(spaceType, spaceID string) { } } +// isRootNode checks if a node is a space root func isRootNode(nodePath string) bool { attr, err := xattrs.Get(nodePath, xattrs.ParentidAttr) return err == nil && attr == node.RootID @@ -217,7 +219,7 @@ func isSharedNode(nodePath string) bool { func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) { md, err := os.Stat(n.InternalPath()) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound(n.ID) } return nil, errors.Wrap(err, "tree: error stating "+n.ID) @@ -350,7 +352,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro dir := n.InternalPath() f, err := os.Open(dir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound(dir) } return nil, errors.Wrap(err, "tree: error listing "+dir) diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index 617b3dae5a..a1d5a2f7a6 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -28,6 +28,7 @@ import ( "hash" "hash/adler32" "io" + iofs "io/fs" "io/ioutil" "os" "path/filepath" @@ -307,7 +308,7 @@ func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, info := tusd.FileInfo{} data, err := ioutil.ReadFile(infoPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, iofs.ErrNotExist) { // Interpret os.ErrNotExist as 404 Not Found err = tusd.ErrNotFound } @@ -636,7 +637,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { return errors.Wrap(err, "Decomposedfs: could not remove symlink child entry") } } - if os.IsNotExist(err) || link != "../"+n.ID { + if errors.Is(err, iofs.ErrNotExist) || link != "../"+n.ID { relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2)) if err = os.Symlink(relativeNodePath, childNameLink); err != nil { return errors.Wrap(err, "Decomposedfs: could not symlink child entry") @@ -645,7 +646,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { // only delete the upload if it was successfully written to the storage if err = os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { sublog.Err(err).Msg("Decomposedfs: could not delete upload info") return } @@ -683,13 +684,13 @@ func tryWritingChecksum(log *zerolog.Logger, n *node.Node, algo string, h hash.H func (upload *fileUpload) discardChunk() { if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk") return } } if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("infoPath", upload.infoPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk info") return } @@ -708,12 +709,12 @@ func (fs *Decomposedfs) AsTerminatableUpload(upload tusd.Upload) tusd.Terminatab // Terminate terminates the upload func (upload *fileUpload) Terminate(ctx context.Context) error { if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { return err } } if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, iofs.ErrNotExist) { return err } } From b55bae1e153d368a059043f5425179da999ea66c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Friedrich=20Dreyer?= Date: Mon, 28 Feb 2022 16:22:08 +0000 Subject: [PATCH 7/7] fix locks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörn Friedrich Dreyer --- pkg/storage/utils/decomposedfs/node/locks.go | 42 +++++++++++++++----- pkg/storage/utils/decomposedfs/node/node.go | 2 +- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/pkg/storage/utils/decomposedfs/node/locks.go b/pkg/storage/utils/decomposedfs/node/locks.go index c0ada2330c..afb0443e72 100644 --- a/pkg/storage/utils/decomposedfs/node/locks.go +++ b/pkg/storage/utils/decomposedfs/node/locks.go @@ -23,6 +23,7 @@ import ( "encoding/json" "io/fs" "os" + "path/filepath" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" @@ -36,7 +37,7 @@ import ( // SetLock sets a lock on the node func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { - nodepath := n.LockFilePath() + lockFilePath := n.LockFilePath() // check existing lock if l, _ := n.ReadLock(ctx); l != nil { @@ -45,13 +46,17 @@ func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { return errtypes.Locked(l.LockId) } - err := os.Remove(n.LockFilePath()) + err := os.Remove(lockFilePath) if err != nil { return err } } - fileLock, err := filelocks.AcquireWriteLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(lockFilePath), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireWriteLock(n.InternalPath()) if err != nil { return err @@ -67,7 +72,7 @@ func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { }() // O_EXCL to make open fail when the file already exists - f, err := os.OpenFile(nodepath, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0600) + f, err := os.OpenFile(lockFilePath, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { return errors.Wrap(err, "Decomposedfs: could not create lock file") } @@ -83,7 +88,11 @@ func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error { // ReadLock reads the lock id for a node func (n Node) ReadLock(ctx context.Context) (*provider.Lock, error) { - fileLock, err := filelocks.AcquireReadLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireReadLock(n.InternalPath()) if err != nil { return nil, err @@ -118,7 +127,11 @@ func (n Node) ReadLock(ctx context.Context) (*provider.Lock, error) { // RefreshLock refreshes the node's lock func (n *Node) RefreshLock(ctx context.Context, lock *provider.Lock) error { - fileLock, err := filelocks.AcquireWriteLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireWriteLock(n.InternalPath()) if err != nil { return err @@ -171,7 +184,11 @@ func (n *Node) RefreshLock(ctx context.Context, lock *provider.Lock) error { // Unlock unlocks the node func (n *Node) Unlock(ctx context.Context, lock *provider.Lock) error { - fileLock, err := filelocks.AcquireWriteLock(n.LockFilePath()) + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireWriteLock(n.InternalPath()) if err != nil { return err @@ -236,8 +253,13 @@ func (n *Node) CheckLock(ctx context.Context) error { return nil // ok } -func readLocksIntoOpaque(ctx context.Context, lockPath string, ri *provider.ResourceInfo) error { - fileLock, err := filelocks.AcquireReadLock(lockPath) +func readLocksIntoOpaque(ctx context.Context, n *Node, ri *provider.ResourceInfo) error { + + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock") + } + fileLock, err := filelocks.AcquireReadLock(n.InternalPath()) if err != nil { return err @@ -252,7 +274,7 @@ func readLocksIntoOpaque(ctx context.Context, lockPath string, ri *provider.Reso } }() - f, err := os.Open(lockPath) + f, err := os.Open(n.LockFilePath()) if err != nil { appctx.GetLogger(ctx).Error().Err(err).Msg("Decomposedfs: could not open lock file") return err diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index 92570c5e35..f5febee408 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -684,7 +684,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi // read locks if _, ok := mdKeysMap[LockdiscoveryKey]; returnAllKeys || ok { if n.hasLocks(ctx) { - err = readLocksIntoOpaque(ctx, n.LockFilePath(), ri) + err = readLocksIntoOpaque(ctx, n, ri) if err != nil { sublog.Debug().Err(errtypes.InternalError("lockfail")) }