Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Retention Policy #237

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ Please check the [releases](https://github.com/mochidev/CodableDatastore/release
dependencies: [
.package(
url: "https://github.com/mochidev/CodableDatastore.git",
.upToNextMinor(from: "0.3.4")
.upToNextMinor(from: "0.4.0")
),
],
...
Expand Down Expand Up @@ -261,8 +261,7 @@ Note that in the example above, even though the author is persisted first, if an

As this project matures towards release, the project will focus on the functionality and work listed below:
- Force migration methods
- Composite indexes (via macros?)
- Cleaning up old resources on disk
- Composite indexes
- Ranged deletes
- Controls for the edit history
- Helper types to use with SwiftUI/Observability/Combine that can make data available on the main actor and filter and stay up to date
Expand All @@ -271,7 +270,7 @@ As this project matures towards release, the project will focus on the functiona
- An example app
- A memory persistence useful for testing apps with
- A pre-configured data store tuned to storing pure Data, useful for types like Images
- Cleaning up memory leaks
- Cleaning up memory and file descriptor leaks

The above list will be kept up to date during development and will likely see additions during that process.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,15 @@ extension DatastoreIndexManifest {
}
}

extension DatastoreIndexManifest {
func pagesToPrune(for mode: SnapshotPruneMode) -> Set<DatastorePageIdentifier> {
switch mode {
case .pruneRemoved: Set(removedPageIDs)
case .pruneAdded: Set(addedPageIDs)
}
}
}

// MARK: - Decoding

extension DatastoreIndexManifest {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,38 @@ import Foundation

typealias DatastoreRootIdentifier = DatedIdentifier<DiskPersistence<ReadOnly>.Datastore.RootObject>

struct DatastoreRootReference: Codable, Hashable {
var datastoreID: DatastoreIdentifier?
var datastoreRootID: DatastoreRootIdentifier

init(datastoreID: DatastoreIdentifier, datastoreRootID: DatastoreRootIdentifier) {
self.datastoreID = datastoreID
self.datastoreRootID = datastoreRootID
}

init(from decoder: any Decoder) throws {
/// Attempt to decode a full object, otherwise fall back to a single value as it was prior to version 0.4 (2024-10-11)
do {
let container: KeyedDecodingContainer<CodingKeys> = try decoder.container(keyedBy: CodingKeys.self)
self.datastoreID = try container.decodeIfPresent(DatastoreIdentifier.self, forKey: .datastoreID)
self.datastoreRootID = try container.decode(DatastoreRootIdentifier.self, forKey: .datastoreRootID)
} catch {
self.datastoreID = nil
self.datastoreRootID = try decoder.singleValueContainer().decode(DatastoreRootIdentifier.self)
}
}
}

extension DiskPersistence.Datastore {
actor RootObject: Identifiable {
let datastore: DiskPersistence<AccessMode>.Datastore

let id: DatastoreRootIdentifier

nonisolated var referenceID: DatastoreRootReference {
DatastoreRootReference(datastoreID: datastore.id, datastoreRootID: id)
}

var _rootObject: DatastoreRootManifest?

var isPersisted: Bool
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,25 @@ extension DatastoreRootManifest {
}
}
}

extension DatastoreRootManifest {
func indexesToPrune(for mode: SnapshotPruneMode) -> Set<IndexID> {
switch mode {
case .pruneRemoved: removedIndexes
case .pruneAdded: addedIndexes
}
}

func indexManifestsToPrune(
for mode: SnapshotPruneMode,
options: SnapshotPruneOptions
) -> Set<IndexManifestID> {
switch (mode, options) {
case (.pruneRemoved, .pruneAndDelete): removedIndexManifests
case (.pruneAdded, .pruneAndDelete): addedIndexManifests
/// Flip the results when we aren't deleting, but only when removing from the bottom end.
case (.pruneRemoved, .pruneOnly): addedIndexManifests
case (.pruneAdded, .pruneOnly): []
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,109 @@ extension DiskPersistence.Datastore {
}
}

func pruneRootObject(with identifier: RootObject.ID, mode: SnapshotPruneMode, shouldDelete: Bool) async throws {
let fileManager = FileManager()
let rootObject = try loadRootObject(for: identifier, shouldCache: false)

/// Collect the indexes and related manifests we'll be deleting.
/// - For indexes, only collect the ones we'll be deleting since the ones we are keeping won't be making references to other deletable assets.
/// - For the manifests, we'll be deleting the entries that are being removed (relative to the direction we are removing from, so the removed ones from the oldest edge, and the added ones from the newest edge, as determined by the caller), while we'll be checking for pages to remove from entries that have just been added, but only when removing from the oldest edge. We only do this for the oldest edge because pages that have been "removed" from the newest edge are actually being _restored_ and not replaced, which maintains symmetry in a non-obvious way.
let indexesToPruneAndDelete = rootObject.indexesToPrune(for: mode)
let indexManifestsToPruneAndDelete = rootObject.indexManifestsToPrune(for: mode, options: .pruneAndDelete)
let indexManifestsToPrune = rootObject.indexManifestsToPrune(for: mode, options: .pruneOnly)

/// Delete the index manifests and pages we know to be removed.
for indexManifestID in indexManifestsToPruneAndDelete {
let indexID = Index.ID(indexManifestID)
defer {
trackedIndexes.removeValue(forKey: indexID)
loadedIndexes.remove(indexID)
}
/// Skip any manifests for indexes being deleted, since we'll just unlink the whole directory in that case.
guard !indexesToPruneAndDelete.contains(indexID.indexID) else { continue }

let manifestURL = manifestURL(for: indexID)
let manifest: DatastoreIndexManifest?
do {
manifest = try await DatastoreIndexManifest(contentsOf: manifestURL, id: indexID.manifestID)
} catch URLError.fileDoesNotExist, CocoaError.fileReadNoSuchFile, CocoaError.fileNoSuchFile, POSIXError.ENOENT {
manifest = nil
} catch {
print("Uncaught Manifest Error: \(error)")
throw error
}

guard let manifest else { continue }

/// Only delete the pages we know to be removed
let pagesToPruneAndDelete = manifest.pagesToPrune(for: mode)
for pageID in pagesToPruneAndDelete {
let indexedPageID = Page.ID(index: indexID, page: pageID)
defer {
trackedPages.removeValue(forKey: indexedPageID.withoutManifest)
loadedPages.remove(indexedPageID.withoutManifest)
}

let pageURL = pageURL(for: indexedPageID)

try? fileManager.removeItem(at: pageURL)
try? fileManager.removeDirectoryIfEmpty(url: pageURL.deletingLastPathComponent(), recursivelyRemoveParents: true)
}

try? fileManager.removeItem(at: manifestURL)
}

/// Prune the index manifests that were just added, as they themselves refer to other deleted pages.
for indexManifestID in indexManifestsToPrune {
let indexID = Index.ID(indexManifestID)
/// Skip any manifests for indexes being deleted, since we'll just unlink the whole directory in that case.
guard !indexesToPruneAndDelete.contains(indexID.indexID) else { continue }

let manifestURL = manifestURL(for: indexID)
let manifest: DatastoreIndexManifest?
do {
manifest = try await DatastoreIndexManifest(contentsOf: manifestURL, id: indexID.manifestID)
} catch URLError.fileDoesNotExist, CocoaError.fileReadNoSuchFile, CocoaError.fileNoSuchFile, POSIXError.ENOENT {
manifest = nil
} catch {
print("Uncaught Manifest Error: \(error)")
throw error
}

guard let manifest else { continue }

/// Only delete the pages we know to be removed
let pagesToPruneAndDelete = manifest.pagesToPrune(for: mode)
for pageID in pagesToPruneAndDelete {
let indexedPageID = Page.ID(index: indexID, page: pageID)
defer {
trackedPages.removeValue(forKey: indexedPageID.withoutManifest)
loadedPages.remove(indexedPageID.withoutManifest)
}

let pageURL = pageURL(for: indexedPageID)

try? fileManager.removeItem(at: pageURL)
try? fileManager.removeDirectoryIfEmpty(url: pageURL.deletingLastPathComponent(), recursivelyRemoveParents: true)
}
}

/// Delete any indexes in their entirety.
for indexID in indexesToPruneAndDelete {
try? fileManager.removeItem(at: indexURL(for: indexID))
}

/// If we are deleting the root object itself, do so at the very end as everything else would have been cleaned up.
if shouldDelete {
trackedRootObjects.removeValue(forKey: identifier)
loadedRootObjects.remove(identifier)

let rootURL = rootURL(for: rootObject.id)
try? fileManager.removeItem(at: rootURL)
try? fileManager.removeDirectoryIfEmpty(url: rootURL.deletingLastPathComponent(), recursivelyRemoveParents: true)
}
}

func index(for identifier: Index.ID) -> Index {
if let index = trackedIndexes[identifier]?.value {
return index
Expand Down Expand Up @@ -217,14 +320,16 @@ extension DiskPersistence.Datastore {

extension DiskPersistence.Datastore {
/// Load the root object from disk for the given identifier.
func loadRootObject(for rootIdentifier: DatastoreRootIdentifier) throws -> DatastoreRootManifest {
func loadRootObject(for rootIdentifier: DatastoreRootIdentifier, shouldCache: Bool = true) throws -> DatastoreRootManifest {
let rootObjectURL = rootURL(for: rootIdentifier)

let data = try Data(contentsOf: rootObjectURL)

let root = try JSONDecoder.shared.decode(DatastoreRootManifest.self, from: data)

cachedRootObject = root
if shouldCache {
cachedRootObject = root
}
return root
}

Expand Down
Loading
Loading