// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved. // // stringlint:disable import UIKit import CryptoKit import Combine import GRDB // MARK: - Singleton public extension Singleton { static let storage: SingletonConfig = Dependencies.create( identifier: "storage", createInstance: { dependencies in Storage(using: dependencies) } ) static let scheduler: SingletonConfig = Dependencies.create( identifier: "scheduler", createInstance: { _ in AsyncValueObservationScheduler.async(onQueue: .main) } ) } // MARK: - Log.Category public extension Log.Category { static let storage: Log.Category = .create("Storage", defaultLevel: .info) } // MARK: - KeychainStorage public extension KeychainStorage.DataKey { static let dbCipherKeySpec: Self = "GRDBDatabaseCipherKeySpec" } // MARK: - Storage open class Storage { public struct CurrentlyRunningMigration: ThreadSafeType { public let identifier: TargetMigrations.Identifier public let migration: Migration.Type } public static let queuePrefix: String = "SessionDatabase" public static let dbFileName: String = "Session.sqlite" private static let SQLCipherKeySpecLength: Int = 48 /// If a transaction takes longer than this duration a warning will be logged but the transaction will continue to run private static let slowTransactionThreshold: TimeInterval = 3 /// When attempting to do a write the transaction will wait this long to acquite a lock before failing private static let writeTransactionStartTimeout: TimeInterval = 5 private static var sharedDatabaseDirectoryPath: String { "\(SessionFileManager.nonInjectedAppSharedDataDirectoryPath)/database" } private static var databasePath: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)" } private static var databasePathShm: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)-shm" } private static var databasePathWal: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)-wal" } private let dependencies: Dependencies fileprivate var dbWriter: DatabaseWriter? internal var testDbWriter: DatabaseWriter? { dbWriter } // MARK: - Migration Variables @ThreadSafeObject private var unprocessedMigrationRequirements: [MigrationRequirement] = MigrationRequirement.allCases @ThreadSafeObject private var migrationProgressUpdater: ((String, CGFloat) -> ())? @ThreadSafeObject private var migrationRequirementProcesser: ((Database, MigrationRequirement) -> ())? @ThreadSafe private var internalCurrentlyRunningMigration: CurrentlyRunningMigration? = nil @ThreadSafe private var migrationsCompleted: Bool = false public var hasCompletedMigrations: Bool { migrationsCompleted } public var currentlyRunningMigration: CurrentlyRunningMigration? { internalCurrentlyRunningMigration } // MARK: - Database State Variables private var startupError: Error? public private(set) var isValid: Bool = false public private(set) var isSuspended: Bool = false public var isDatabasePasswordAccessible: Bool { ((try? getDatabaseCipherKeySpec()) != nil) } /// This property gets set the first time we successfully read from the database public private(set) var hasSuccessfullyRead: Bool = false /// This property gets set the first time we successfully write to the database public private(set) var hasSuccessfullyWritten: Bool = false // MARK: - Initialization public init(customWriter: DatabaseWriter? = nil, using dependencies: Dependencies) { self.dependencies = dependencies configureDatabase(customWriter: customWriter) } public init( testAccessTo databasePath: String, encryptedKeyPath: String, encryptedKeyPassword: String, using dependencies: Dependencies ) throws { self.dependencies = dependencies try testAccess( databasePath: databasePath, encryptedKeyPath: encryptedKeyPath, encryptedKeyPassword: encryptedKeyPassword ) } private func configureDatabase(customWriter: DatabaseWriter? = nil) { // Create the database directory if needed and ensure it's protection level is set before attempting to // create the database KeySpec or the database itself try? dependencies[singleton: .fileManager].ensureDirectoryExists(at: Storage.sharedDatabaseDirectoryPath) try? dependencies[singleton: .fileManager].protectFileOrFolder(at: Storage.sharedDatabaseDirectoryPath) // If a custom writer was provided then use that (for unit testing) guard customWriter == nil else { dbWriter = customWriter isValid = true return } /// Generate the database KeySpec if needed (this MUST be done before we try to access the database as a different thread /// might attempt to access the database before the key is successfully created) /// /// We reset the bytes immediately after generation to ensure the database key doesn't hang around in memory unintentionally /// /// **Note:** If we fail to get/generate the keySpec then don't bother continuing to setup the Database as it'll just be invalid, /// in this case the App/Extensions will have logic that checks the `isValid` flag of the database do { var tmpKeySpec: Data = try getOrGenerateDatabaseKeySpec() tmpKeySpec.resetBytes(in: 0.. Set { let migrator: DatabaseMigrator = DatabaseMigrator() return (try? migrator.appliedIdentifiers(db)) .defaulting(to: []) } public static func sortedMigrationInfo(migrationTargets: [MigratableTarget.Type]) -> [KeyedMigration] { typealias MigrationInfo = (identifier: TargetMigrations.Identifier, migrations: TargetMigrations.MigrationSet) return migrationTargets .map { target -> TargetMigrations in target.migrations() } .sorted() .reduce(into: [[MigrationInfo]]()) { result, next in next.migrations.enumerated().forEach { index, migrationSet in if result.count <= index { result.append([]) } result[index] = (result[index] + [(next.identifier, migrationSet)]) } } .reduce(into: []) { result, next in next.forEach { identifier, migrations in result.append(contentsOf: migrations.map { (identifier.key(with: $0), identifier, $0) }) } } } public func perform( migrationTargets: [MigratableTarget.Type], async: Bool = true, onProgressUpdate: ((CGFloat, TimeInterval) -> ())?, onMigrationRequirement: @escaping (Database, MigrationRequirement) -> (), onComplete: @escaping (Result, Bool) -> () ) { perform( sortedMigrations: Storage.sortedMigrationInfo(migrationTargets: migrationTargets), async: async, onProgressUpdate: onProgressUpdate, onMigrationRequirement: onMigrationRequirement, onComplete: onComplete ) } internal func perform( sortedMigrations: [KeyedMigration], async: Bool, onProgressUpdate: ((CGFloat, TimeInterval) -> ())?, onMigrationRequirement: @escaping (Database, MigrationRequirement) -> (), onComplete: @escaping (Result, Bool) -> () ) { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { let error: Error = (startupError ?? StorageError.startupFailed) Log.error(.storage, "Statup failed with error: \(error)") onComplete(.failure(error), false) return } // Setup and run any required migrations var migrator: DatabaseMigrator = DatabaseMigrator() sortedMigrations.forEach { _, identifier, migration in migrator.registerMigration( self, targetIdentifier: identifier, migration: migration, using: dependencies ) } // Determine which migrations need to be performed and gather the relevant settings needed to // inform the app of progress/states let completedMigrations: [String] = (try? dbWriter.read { db in try migrator.completedMigrations(db) }) .defaulting(to: []) let unperformedMigrations: [KeyedMigration] = sortedMigrations .reduce(into: []) { result, next in guard !completedMigrations.contains(next.key) else { return } result.append(next) } let migrationToDurationMap: [String: TimeInterval] = unperformedMigrations .reduce(into: [:]) { result, next in result[next.key] = next.migration.minExpectedRunDuration } let unperformedMigrationDurations: [TimeInterval] = unperformedMigrations .map { _, _, migration in migration.minExpectedRunDuration } let totalMinExpectedDuration: TimeInterval = migrationToDurationMap.values.reduce(0, +) let needsConfigSync: Bool = unperformedMigrations .contains(where: { _, _, migration in migration.needsConfigSync }) self._migrationProgressUpdater.set(to: { targetKey, progress in guard let migrationIndex: Int = unperformedMigrations.firstIndex(where: { key, _, _ in key == targetKey }) else { return } let completedExpectedDuration: TimeInterval = ( (migrationIndex > 0 ? unperformedMigrationDurations[0..) -> () = { [weak self, migrator, dbWriter] result in // Make sure to transition the progress updater to 100% for the final migration (just // in case the migration itself didn't update to 100% itself) if let lastMigrationKey: String = unperformedMigrations.last?.key { self?.migrationProgressUpdater?(lastMigrationKey, 1) } // Process any unprocessed requirements which need to be processed before completion // then clear out the state let requirementProcessor: ((Database, MigrationRequirement) -> ())? = self?.migrationRequirementProcesser let remainingMigrationRequirements: [MigrationRequirement] = (self?.unprocessedMigrationRequirements .filter { $0.shouldProcessAtCompletionIfNotRequired }) .defaulting(to: []) self?.migrationsCompleted = true self?._migrationProgressUpdater.set(to: nil) self?._migrationRequirementProcesser.set(to: nil) // Process any remaining migration requirements if !remainingMigrationRequirements.isEmpty && requirementProcessor != nil { self?.write { db in remainingMigrationRequirements.forEach { requirementProcessor?(db, $0) } } } // Reset in case there is a requirement on a migration which runs when returning from // the background self?._unprocessedMigrationRequirements.set(to: MigrationRequirement.allCases) // Don't log anything in the case of a 'success' or if the database is suspended (the // latter will happen if the user happens to return to the background too quickly on // launch so is unnecessarily alarming, it also gets caught and logged separately by // the 'write' functions anyway) switch result { case .success: break case .failure(DatabaseError.SQLITE_ABORT): break case .failure(let error): let completedMigrations: [String] = (try? dbWriter .read { db in try migrator.completedMigrations(db) }) .defaulting(to: []) let failedMigrationName: String = migrator.migrations .filter { !completedMigrations.contains($0) } .first .defaulting(to: "Unknown") Log.critical(.migration, "Migration '\(failedMigrationName)' failed with error: \(error)") } onComplete(result, needsConfigSync) } // if there aren't any migrations to run then just complete immediately (this way the migrator // doesn't try to execute on the DBWrite thread so returning from the background can't get blocked // due to some weird endless process running) guard !unperformedMigrations.isEmpty else { migrationCompleted(.success(())) return } // If we have an unperformed migration then trigger the progress updater immediately if let firstMigrationKey: String = unperformedMigrations.first?.key { self.migrationProgressUpdater?(firstMigrationKey, 0) } // Note: The non-async migration should only be used for unit tests guard async else { return migrationCompleted(Result(catching: { try migrator.migrate(dbWriter) })) } migrator.asyncMigrate(dbWriter) { [dependencies] result in let finalResult: Result = { switch result { case .failure(let error): return .failure(error) case .success: return .success(()) } }() // Note: We need to dispatch this after a small 0.01 delay to prevent any potential // re-entrancy issues since the 'asyncMigrate' returns a result containing a DB instance // within a transaction DispatchQueue.global(qos: .userInitiated).asyncAfter(deadline: .now() + 0.01, using: dependencies) { migrationCompleted(finalResult) } } } public func willStartMigration( _ db: Database, _ migration: Migration.Type, _ identifier: TargetMigrations.Identifier ) { internalCurrentlyRunningMigration = CurrentlyRunningMigration( identifier: identifier, migration: migration ) guard let largestMigrationRequirement: MigrationRequirement = migration.requirements.max() else { return } // Get all requirements that are earlier than the largest requirement on the migration and find // any that haven't yet been run (we need to run them in order as later requirements might be // dependant on earlier ones and earlier requirements might not be included in the migration) let requirementsToProcess: [MigrationRequirement] = MigrationRequirement.allCases .filter { $0 <= largestMigrationRequirement } .asSet() .intersection(unprocessedMigrationRequirements.asSet()) .sorted() // No need to do anything if there are no unprocessed requirements guard !requirementsToProcess.isEmpty else { return } // Process all of the requirements for this migration requirementsToProcess.forEach { migrationRequirementProcesser?(db, $0) } // Remove any processed requirements from the list (don't want to process them multiple times) _unprocessedMigrationRequirements.performUpdate { Array($0.asSet().subtracting(requirementsToProcess.asSet())) } } public func didCompleteMigration() { internalCurrentlyRunningMigration = nil } public static func update( progress: CGFloat, for migration: Migration.Type, in target: TargetMigrations.Identifier, using dependencies: Dependencies ) { // In test builds ignore any migration progress updates (we run in a custom database writer anyway) guard !SNUtilitiesKit.isRunningTests else { return } dependencies[singleton: .storage].migrationProgressUpdater?(target.key(with: migration), progress) } // MARK: - Security private func getDatabaseCipherKeySpec() throws -> Data { try dependencies[singleton: .keychain].migrateLegacyKeyIfNeeded( legacyKey: "GRDBDatabaseCipherKeySpec", legacyService: "TSKeyChainService", toKey: .dbCipherKeySpec ) return try dependencies[singleton: .keychain].data(forKey: .dbCipherKeySpec) } private func getOrGenerateDatabaseKeySpec() throws -> Data { do { var keySpec: Data = try getDatabaseCipherKeySpec() defer { keySpec.resetBytes(in: 0..(_ error: Error, isWrite: Bool) -> T? { logIfNeeded(error, isWrite: isWrite) return nil } static func logIfNeeded(_ error: Error, isWrite: Bool) -> AnyPublisher { logIfNeeded(error, isWrite: isWrite) return Fail(error: error).eraseToAnyPublisher() } } private static func perform( info: CallInfo, updates: @escaping (Database) throws -> T ) -> (Database) throws -> T { return { db in guard info.storage?.isSuspended == false else { throw StorageError.databaseSuspended } let timer: TransactionTimer = TransactionTimer.start(duration: Storage.slowTransactionThreshold, info: info) defer { timer.stop() } // Get the result let result: T = try updates(db) // Update the state flags switch info.isWrite { case true: info.storage?.hasSuccessfullyWritten = true case false: info.storage?.hasSuccessfullyRead = true } return result } } // MARK: - Functions @discardableResult public func write( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, updates: @escaping (Database) throws -> T? ) -> T? { switch StorageState(self) { case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true) case .valid(let dbWriter): let info: CallInfo = CallInfo(fileName, functionName, lineNumber, true, self) do { return try dbWriter.write(Storage.perform(info: info, updates: updates)) } catch { return StorageState.logIfNeeded(error, isWrite: true) } } } open func writeAsync( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, updates: @escaping (Database) throws -> T, completion: @escaping (Database, Swift.Result) throws -> Void = { _, _ in } ) { switch StorageState(self) { case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true) case .valid(let dbWriter): let info: CallInfo = CallInfo(fileName, functionName, lineNumber, true, self) dbWriter.asyncWrite( Storage.perform(info: info, updates: updates), completion: { db, result in switch result { case .failure(let error): StorageState.logIfNeeded(error, isWrite: true) default: break } try? completion(db, result) } ) } } open func writePublisher( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, updates: @escaping (Database) throws -> T ) -> AnyPublisher { switch StorageState(self) { case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true) case .valid: /// **Note:** GRDB does have a `writePublisher` method but it appears to asynchronously trigger /// both the `output` and `complete` closures at the same time which causes a lot of unexpected /// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code /// for more information see https://github.com/groue/GRDB.swift/issues/1334) /// /// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled /// which behaves in a much more expected way than the GRDB `writePublisher` does let info: CallInfo = CallInfo(fileName, functionName, lineNumber, true, self) return Deferred { Future { [weak self] resolver in /// The `StorageState` may have changed between the creation of the publisher and it actually /// being executed so we need to check again switch StorageState(self) { case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true) case .valid(let dbWriter): do { resolver(Result.success(try dbWriter.write(Storage.perform(info: info, updates: updates)))) } catch { StorageState.logIfNeeded(error, isWrite: true) resolver(Result.failure(error)) } } } }.eraseToAnyPublisher() } } @discardableResult public func read( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, _ value: @escaping (Database) throws -> T? ) -> T? { switch StorageState(self) { case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: false) case .valid(let dbWriter): let info: CallInfo = CallInfo(fileName, functionName, lineNumber, false, self) do { return try dbWriter.read(Storage.perform(info: info, updates: value)) } catch { return StorageState.logIfNeeded(error, isWrite: false) } } } open func readPublisher( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, value: @escaping (Database) throws -> T ) -> AnyPublisher { switch StorageState(self) { case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: false) case .valid: /// **Note:** GRDB does have a `readPublisher` method but it appears to asynchronously trigger /// both the `output` and `complete` closures at the same time which causes a lot of unexpected /// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code /// for more information see https://github.com/groue/GRDB.swift/issues/1334) /// /// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled /// which behaves in a much more expected way than the GRDB `readPublisher` does let info: CallInfo = CallInfo(fileName, functionName, lineNumber, false, self) return Deferred { Future { [weak self] resolver in /// The `StorageState` may have changed between the creation of the publisher and it actually /// being executed so we need to check again switch StorageState(self) { case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: false) case .valid(let dbWriter): do { resolver(Result.success(try dbWriter.read(Storage.perform(info: info, updates: value)))) } catch { StorageState.logIfNeeded(error, isWrite: false) resolver(Result.failure(error)) } } } }.eraseToAnyPublisher() } } /// Rever to the `ValueObservation.start` method for full documentation /// /// - parameter observation: The observation to start /// - parameter scheduler: A Scheduler. By default, fresh values are /// dispatched asynchronously on the main queue. /// - parameter onError: A closure that is provided eventual errors that /// happen during observation /// - parameter onChange: A closure that is provided fresh values /// - returns: a DatabaseCancellable public func start( _ observation: ValueObservation, scheduling scheduler: ValueObservationScheduler = .async(onQueue: .main), onError: @escaping (Error) -> Void, onChange: @escaping (Reducer.Value) -> Void ) -> DatabaseCancellable { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { onError(StorageError.databaseInvalid) return AnyDatabaseCancellable(cancel: {}) } return observation.start( in: dbWriter, scheduling: scheduler, onError: onError, onChange: onChange ) } public func addObserver(_ observer: TransactionObserver?) { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return } guard let observer: TransactionObserver = observer else { return } // Note: This actually triggers a write to the database so can be blocked by other // writes, since it's usually called on the main thread when creating a view controller // this can result in the UI hanging - to avoid this we dispatch (and hope there isn't // negative impact) DispatchQueue.global(qos: .default).async { dbWriter.add(transactionObserver: observer) } } public func removeObserver(_ observer: TransactionObserver?) { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return } guard let observer: TransactionObserver = observer else { return } // Note: This actually triggers a write to the database so can be blocked by other // writes, since it's usually called on the main thread when creating a view controller // this can result in the UI hanging - to avoid this we dispatch (and hope there isn't // negative impact) DispatchQueue.global(qos: .default).async { dbWriter.remove(transactionObserver: observer) } } } // MARK: - Combine Extensions public extension ValueObservation { func publisher( in storage: Storage, scheduling scheduler: ValueObservationScheduler ) -> AnyPublisher where Reducer: ValueReducer { guard storage.isValid, let dbWriter: DatabaseWriter = storage.dbWriter else { return Fail(error: StorageError.databaseInvalid).eraseToAnyPublisher() } return self.publisher(in: dbWriter, scheduling: scheduler) .eraseToAnyPublisher() } } public extension Publisher where Failure == Error { func flatMapStorageWritePublisher(using dependencies: Dependencies, updates: @escaping (Database, Output) throws -> T) -> AnyPublisher { return self.flatMap { output -> AnyPublisher in dependencies[singleton: .storage].writePublisher(updates: { db in try updates(db, output) }) }.eraseToAnyPublisher() } func flatMapStorageReadPublisher(using dependencies: Dependencies, value: @escaping (Database, Output) throws -> T) -> AnyPublisher { return self.flatMap { output -> AnyPublisher in dependencies[singleton: .storage].readPublisher(value: { db in try value(db, output) }) }.eraseToAnyPublisher() } } // MARK: - CallInfo private extension Storage { class CallInfo { let file: String let function: String let line: Int let isWrite: Bool weak var storage: Storage? var callInfo: String { let fileInfo: String = (file.components(separatedBy: "/").last.map { "\($0):\(line) - " } ?? "") return "\(fileInfo)\(function)" } init( _ file: String, _ function: String, _ line: Int, _ isWrite: Bool, _ storage: Storage? ) { self.file = file self.function = function self.line = line self.isWrite = isWrite self.storage = storage } } } // MARK: - TransactionTimer private extension Storage { private static let timerQueue = DispatchQueue(label: "\(Storage.queuePrefix)-.transactionTimer", qos: .background) class TransactionTimer { private let info: Storage.CallInfo private let start: CFTimeInterval = CACurrentMediaTime() private var timer: DispatchSourceTimer? = DispatchSource.makeTimerSource(queue: Storage.timerQueue) private var wasSlowTransaction: Bool = false private init(info: Storage.CallInfo) { self.info = info } static func start(duration: TimeInterval, info: Storage.CallInfo) -> TransactionTimer { let result: TransactionTimer = TransactionTimer(info: info) result.timer?.schedule(deadline: .now() + .seconds(Int(duration)), repeating: .infinity) // Infinity to fire once result.timer?.setEventHandler { [weak result] in result?.timer?.cancel() result?.timer = nil let action: String = (info.isWrite ? "write" : "read") Log.warn(.storage, "Slow \(action) taking longer than \(Storage.slowTransactionThreshold, format: ".2", omitZeroDecimal: true)s - [ \(info.callInfo) ]") result?.wasSlowTransaction = true } result.timer?.resume() return result } func stop() { timer?.cancel() timer = nil guard wasSlowTransaction else { return } let end: CFTimeInterval = CACurrentMediaTime() let action: String = (info.isWrite ? "write" : "read") Log.warn(.storage, "Slow \(action) completed after \(end - start, format: ".2", omitZeroDecimal: true)s - [ \(info.callInfo) ]") } } } // MARK: - Debug Convenience public extension Storage { static let encKeyFilename: String = "key.enc" func testAccess( databasePath: String, encryptedKeyPath: String, encryptedKeyPassword: String ) throws { /// First we need to ensure we can decrypt the encrypted key file do { var tmpKeySpec: Data = try decryptSecureExportedKey( path: encryptedKeyPath, password: encryptedKeyPassword ) tmpKeySpec.resetBytes(in: 0.. String { var keySpec: Data = try getOrGenerateDatabaseKeySpec() defer { keySpec.resetBytes(in: 0.. Data { let encKeyBase64: String = try String(contentsOf: URL(fileURLWithPath: path), encoding: .utf8) guard var passwordData: Data = password.data(using: .utf8), var encKeyData: Data = Data(base64Encoded: encKeyBase64) else { throw StorageError.generic } defer { // Reset content immediately after use passwordData.resetBytes(in: 0..