Fixed a couple of migration issues and minor tweaks

Added the ability to define requirements for migrations (in case some data or state needs to be loaded for a migration to be able to be performed correctly)
Fixed a bug where the migration would throw because the user config might not have loaded it's state yet
Fixed a bug where the migration would throw if the user didn't exist yet
Commented out some logic in the ConfigSyncJob that could be problemmatic
pull/941/head
Morgan Pretty 2 years ago
parent 63be502434
commit c293bbca3a

@ -32,7 +32,6 @@
34D1F0521F7E8EA30066283D /* GiphyDownloader.swift in Sources */ = {isa = PBXBuildFile; fileRef = 34D1F0511F7E8EA30066283D /* GiphyDownloader.swift */; };
34D99CE4217509C2000AFB39 /* AppEnvironment.swift in Sources */ = {isa = PBXBuildFile; fileRef = 34D99CE3217509C1000AFB39 /* AppEnvironment.swift */; };
34F308A21ECB469700BB7697 /* OWSBezierPathView.m in Sources */ = {isa = PBXBuildFile; fileRef = 34F308A11ECB469700BB7697 /* OWSBezierPathView.m */; };
3B59D92C6C15D82844A6BF16 /* BuildFile in Frameworks */ = {isa = PBXBuildFile; };
4503F1BE20470A5B00CEE724 /* classic-quiet.aifc in Resources */ = {isa = PBXBuildFile; fileRef = 4503F1BB20470A5B00CEE724 /* classic-quiet.aifc */; };
4503F1BF20470A5B00CEE724 /* classic.aifc in Resources */ = {isa = PBXBuildFile; fileRef = 4503F1BC20470A5B00CEE724 /* classic.aifc */; };
450DF2091E0DD2C6003D14BE /* UserNotificationsAdaptee.swift in Sources */ = {isa = PBXBuildFile; fileRef = 450DF2081E0DD2C6003D14BE /* UserNotificationsAdaptee.swift */; };
@ -772,6 +771,7 @@
FDA8EAFE280E8B78002B68E5 /* FailedMessageSendsJob.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA8EAFD280E8B78002B68E5 /* FailedMessageSendsJob.swift */; };
FDA8EB00280E8D58002B68E5 /* FailedAttachmentDownloadsJob.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA8EAFF280E8D58002B68E5 /* FailedAttachmentDownloadsJob.swift */; };
FDA8EB10280F8238002B68E5 /* Codable+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA8EB0F280F8238002B68E5 /* Codable+Utilities.swift */; };
FDAED05C2A7C6CE600091B25 /* MigrationRequirement.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDAED05B2A7C6CE600091B25 /* MigrationRequirement.swift */; };
FDB4BBC72838B91E00B7C95D /* LinkPreviewError.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB4BBC62838B91E00B7C95D /* LinkPreviewError.swift */; };
FDB4BBC92839BEF000B7C95D /* ProfileManagerError.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB4BBC82839BEF000B7C95D /* ProfileManagerError.swift */; };
FDB7400B28EB99A70094D718 /* TimeInterval+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB7400A28EB99A70094D718 /* TimeInterval+Utilities.swift */; };
@ -928,10 +928,8 @@
FDFDE128282D05530098B17F /* MediaPresentationContext.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDFDE127282D05530098B17F /* MediaPresentationContext.swift */; };
FDFDE12A282D056B0098B17F /* MediaZoomAnimationController.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDFDE129282D056B0098B17F /* MediaZoomAnimationController.swift */; };
FDFF61D729F2600300F95FB0 /* Identity+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDFF61D629F2600300F95FB0 /* Identity+Utilities.swift */; };
FE2FFEF6F615EE65BA087187 /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionUtilitiesKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E36D73700ED95C005B6BA026 /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionUtilitiesKit.framework */; };
FE5FDED6D91BB4B3FA5C104D /* BuildFile in Frameworks */ = {isa = PBXBuildFile; };
FDFF9FDF2A787F57005E0628 /* JSONEncoder+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDFF9FDE2A787F57005E0628 /* JSONEncoder+Utilities.swift */; };
FE5FDED6D91BB4B3FA5C104D /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionShareExtension.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 7A9C113D2086D3C8A68A371C /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionShareExtension.framework */; };
FE2FFEF6F615EE65BA087187 /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionUtilitiesKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E36D73700ED95C005B6BA026 /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionUtilitiesKit.framework */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
@ -1895,6 +1893,7 @@
FDA8EAFD280E8B78002B68E5 /* FailedMessageSendsJob.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FailedMessageSendsJob.swift; sourceTree = "<group>"; };
FDA8EAFF280E8D58002B68E5 /* FailedAttachmentDownloadsJob.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FailedAttachmentDownloadsJob.swift; sourceTree = "<group>"; };
FDA8EB0F280F8238002B68E5 /* Codable+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Codable+Utilities.swift"; sourceTree = "<group>"; };
FDAED05B2A7C6CE600091B25 /* MigrationRequirement.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MigrationRequirement.swift; sourceTree = "<group>"; };
FDB4BBC62838B91E00B7C95D /* LinkPreviewError.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LinkPreviewError.swift; sourceTree = "<group>"; };
FDB4BBC82839BEF000B7C95D /* ProfileManagerError.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ProfileManagerError.swift; sourceTree = "<group>"; };
FDB7400A28EB99A70094D718 /* TimeInterval+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "TimeInterval+Utilities.swift"; sourceTree = "<group>"; };
@ -2066,7 +2065,6 @@
C3D90A5C25773A25002C9DF5 /* SessionUtilitiesKit.framework in Frameworks */,
C3402FE52559036600EA6424 /* SessionUIKit.framework in Frameworks */,
B8D64FCB25BA78A90029CFC0 /* SignalUtilitiesKit.framework in Frameworks */,
FE5FDED6D91BB4B3FA5C104D /* BuildFile in Frameworks */,
D843F0BF3CCF3D61C3316F32 /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionShareExtension.framework in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
@ -2199,7 +2197,6 @@
buildActionMask = 2147483647;
files = (
FDC4389227B9FFC700C60D73 /* SessionMessagingKit.framework in Frameworks */,
3B59D92C6C15D82844A6BF16 /* BuildFile in Frameworks */,
C93FE8F2981231B9D7A0D96D /* Pods_GlobalDependencies_FrameworkAndExtensionDependencies_ExtendedDependencies_SessionMessagingKit_SessionMessagingKitTests.framework in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
@ -3727,6 +3724,7 @@
FD17D7BE27F51F8200122BE0 /* ColumnExpressible.swift */,
FD17D7B727F51ECA00122BE0 /* Migration.swift */,
FD17D7B927F51F2100122BE0 /* TargetMigrations.swift */,
FDAED05B2A7C6CE600091B25 /* MigrationRequirement.swift */,
FD17D7C027F5200100122BE0 /* TypedTableDefinition.swift */,
FD37EA1028AB34B3003AE748 /* TypedTableAlteration.swift */,
FD7162DA281B6C440060647B /* TypedTableAlias.swift */,
@ -5430,8 +5428,8 @@
inputFileListPaths = (
);
inputPaths = (
"$BUILT_PRODUCTS_DIR/$INFOPLIST_PATH",
"$TARGET_BUILD_DIR/$INFOPLIST_PATH",
$BUILT_PRODUCTS_DIR/$INFOPLIST_PATH,
$TARGET_BUILD_DIR/$INFOPLIST_PATH,
);
name = "Add Commit Hash To Build Info Plist";
outputFileListPaths = (
@ -5748,6 +5746,7 @@
FD17D7B827F51ECA00122BE0 /* Migration.swift in Sources */,
FD7728982849E8110018502F /* UITableView+ReusableView.swift in Sources */,
7B0EFDEE274F598600FFAAE7 /* TimestampUtils.swift in Sources */,
FDAED05C2A7C6CE600091B25 /* MigrationRequirement.swift in Sources */,
FD52090028AF6153006098F6 /* OWSBackgroundTask.m in Sources */,
C32C5DDB256DD9FF003C73A2 /* ContentProxy.swift in Sources */,
C3A71F892558BA9F0043A11F /* Mnemonic.swift in Sources */,

@ -577,13 +577,15 @@ extension ConversationVC:
)
// Trigger disappear after read
JobRunner.upsert(
dependencies.jobRunner.upsert(
db,
job: DisappearingMessagesJob.updateNextRunIfNeeded(
db,
interaction: insertedInteraction,
startedAtMs: TimeInterval(SnodeAPI.currentOffsetTimestampMs())
)
),
canStartJob: true,
using: dependencies
)
}
.subscribe(on: DispatchQueue.global(qos: .userInitiated))

@ -9,6 +9,7 @@ enum _015_DisappearingMessagesConfiguration: Migration {
static let identifier: String = "DisappearingMessagesWithTypes"
static let needsConfigSync: Bool = false
static let minExpectedRunDuration: TimeInterval = 0.1
static var requirements: [MigrationRequirement] = [.sessionUtilStateLoaded]
static func migrate(_ db: GRDB.Database) throws {
try db.alter(table: DisappearingMessagesConfiguration.self) { t in
@ -25,12 +26,18 @@ enum _015_DisappearingMessagesConfiguration: Migration {
///
/// This is due to new disappearing messages will need some info messages to be able to be unread,
/// but we only want to count the unread message number by incoming visible messages and call messages.
try db.create(
index: "interaction_on_wasRead_and_variant",
on: Interaction.databaseTableName,
columns: [Interaction.Columns.wasRead, Interaction.Columns.variant].map { $0.name }
try db.createIndex(
on: Interaction.self,
columns: [.wasRead, .variant]
)
// If there isn't already a user account then we can just finish here (there will be no
// threads/configs to update and the configs won't be setup which would cause this to crash
guard Identity.userExists(db) else {
return Storage.update(progress: 1, for: self, in: target) // In case this is the last migration
}
// Convenience function to set the disappearing messages type per conversation
func updateDisappearingMessageType(_ db: GRDB.Database, id: String, type: DisappearingMessagesConfiguration.DisappearingMessageType) throws {
_ = try DisappearingMessagesConfiguration
.filter(DisappearingMessagesConfiguration.Columns.threadId == id)
@ -40,6 +47,7 @@ enum _015_DisappearingMessagesConfiguration: Migration {
)
}
// Process any existing disappearing message settings
var contactUpdate: [DisappearingMessagesConfiguration] = []
var legacyGroupUpdate: [DisappearingMessagesConfiguration] = []
@ -47,25 +55,26 @@ enum _015_DisappearingMessagesConfiguration: Migration {
.filter(DisappearingMessagesConfiguration.Columns.isEnabled == true)
.fetchAll(db)
.forEach { config in
if let thread = try? SessionThread.fetchOne(db, id: config.threadId) {
guard !thread.isNoteToSelf(db) else {
guard let thread: SessionThread = try? SessionThread.fetchOne(db, id: config.threadId) else { return }
guard !thread.isNoteToSelf(db) else {
try updateDisappearingMessageType(db, id: config.threadId, type: .disappearAfterSend)
return
}
switch thread.variant {
case .contact:
try updateDisappearingMessageType(db, id: config.threadId, type: .disappearAfterRead)
contactUpdate.append(config.with(type: .disappearAfterRead))
case .legacyGroup, .group:
try updateDisappearingMessageType(db, id: config.threadId, type: .disappearAfterSend)
return
}
switch thread.variant {
case .contact:
try updateDisappearingMessageType(db, id: config.threadId, type: .disappearAfterRead)
contactUpdate.append(config.with(type: .disappearAfterRead))
case .legacyGroup, .group:
try updateDisappearingMessageType(db, id: config.threadId, type: .disappearAfterSend)
legacyGroupUpdate.append(config.with(type: .disappearAfterSend))
case .community:
return
}
legacyGroupUpdate.append(config.with(type: .disappearAfterSend))
case .community: return
}
}
// Update the configs so the settings are synced
_ = try SessionUtil.updatingDisappearingConfigs(db, contactUpdate)
_ = try SessionUtil.batchUpdate(db, disappearingConfigs: legacyGroupUpdate)

@ -66,7 +66,7 @@ public struct Contact: Codable, Identifiable, Equatable, FetchableRecord, Persis
self.id = id
self.isTrusted = (
isTrusted ||
id == getUserHexEncodedPublicKey(using: dependencies) // Always trust ourselves
id == getUserHexEncodedPublicKey(db, using: dependencies) // Always trust ourselves
)
self.isApproved = isApproved
self.isBlocked = isBlocked

@ -6,7 +6,7 @@ import SessionUtil
import SessionUtilitiesKit
import SessionSnodeKit
public struct DisappearingMessagesConfiguration: Codable, Identifiable, Equatable, Hashable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible {
public struct DisappearingMessagesConfiguration: Codable, Identifiable, Equatable, Hashable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible {
public static var databaseTableName: String { "disappearingMessagesConfiguration" }
internal static let threadForeignKey = ForeignKey([Columns.threadId], to: [SessionThread.Columns.id])
private static let thread = belongsTo(SessionThread.self, using: threadForeignKey)
@ -276,7 +276,6 @@ extension DisappearingMessagesConfiguration {
TimeInterval(10),
at: 0
)
#else
#endif
return result
default:

@ -52,26 +52,27 @@ public enum ConfigurationSyncJob: JobExecutor {
return deferred(updatedJob ?? job, dependencies)
}
// We want to update lastReadTimestamp after the disappearing messages are updated to the network, so on
// linked devices the expiration time can be the same and avoid race condition
guard
JobRunner
.infoForCurrentlyRunningJobs(of: .expirationUpdate)
.filter({ _, info in
info.threadId == job.threadId // Exclude expiration update jobs for different threads
})
.isEmpty
else {
// Defer the job to run 'maxRunFrequency'
let updatedJob: Job? = Storage.shared.write { db in
try job
.with(nextRunTimestamp: Date().timeIntervalSince1970 + waitTimeForExpirationUpdate)
.saved(db)
}
SNLog("[ConfigurationSyncJob] For \(job.threadId ?? "UnknownId") deferred due to expiration update jobs running.")
return deferred(updatedJob ?? job)
}
// TODO: This logic needs to be fixed up (the current behaviour will not behave as expected)
// // We want to update lastReadTimestamp after the disappearing messages are updated to the network, so on
// // linked devices the expiration time can be the same and avoid race condition
// guard
// dependencies.jobRunner
// .jobInfoFor(variant: .expirationUpdate)
// .filter({ _, info in
// info.threadId == job.threadId // Exclude expiration update jobs for different threads
// })
// .isEmpty
// else {
// // Defer the job to run 'maxRunFrequency'
// let updatedJob: Job? = dependencies.storage.write(using: dependencies) { db in
// try job
// .with(nextRunTimestamp: Date().timeIntervalSince1970 + waitTimeForExpirationUpdate)
// .saved(db)
// }
//
// SNLog("[ConfigurationSyncJob] For \(job.threadId ?? "UnknownId") deferred due to expiration update jobs running.")
// return deferred(updatedJob ?? job, dependencies)
// }
// If we don't have a userKeyPair yet then there is no need to sync the configuration
// as the user doesn't exist yet (this will get triggered on the first launch of a

@ -87,32 +87,6 @@ public enum ExpirationUpdateJob: JobExecutor {
}
)
}
public static func updateExpirationIfNeeded(_ db: Database, interactionId: Int64) {
guard
let interacion: Interaction = try? Interaction.fetchOne(db, id: interactionId),
let startedAtMs: TimeInterval = interacion.expiresStartedAtMs,
let expiresInSeconds: TimeInterval = interacion.expiresInSeconds,
let serverHash: String = interacion.serverHash
else {
return
}
let threadId: String = interacion.threadId
let expirationTimestampMs: Int64 = Int64(startedAtMs + expiresInSeconds * 1000)
JobRunner.add(
db,
job: Job(
variant: .expirationUpdate,
behaviour: .runOnce,
threadId: threadId,
details: ExpirationUpdateJob.Details(
serverHashes: [ serverHash ],
expirationTimestampMs: expirationTimestampMs
)
)
)
}
}
// MARK: - ExpirationUpdateJob.Details

@ -47,8 +47,10 @@ open class Storage {
fileprivate var dbWriter: DatabaseWriter?
internal var testDbWriter: DatabaseWriter? { dbWriter }
private var unprocessedMigrationRequirements: Atomic<[MigrationRequirement]> = Atomic(MigrationRequirement.allCases)
private var migrator: DatabaseMigrator?
private var migrationProgressUpdater: Atomic<((String, CGFloat) -> ())>?
private var migrationRequirementProcesser: Atomic<(Database?, MigrationRequirement) -> ()>?
// MARK: - Initialization
@ -77,6 +79,7 @@ open class Storage {
migrationTargets: (customMigrationTargets ?? []),
async: false,
onProgressUpdate: nil,
onMigrationRequirement: { _, _ in },
onComplete: { _, _ in }
)
return
@ -143,6 +146,7 @@ open class Storage {
migrationTargets: [MigratableTarget.Type],
async: Bool = true,
onProgressUpdate: ((CGFloat, TimeInterval) -> ())?,
onMigrationRequirement: @escaping (Database?, MigrationRequirement) -> (),
onComplete: @escaping (Swift.Result<Void, Error>, Bool) -> ()
) {
guard isValid, let dbWriter: DatabaseWriter = dbWriter else {
@ -227,13 +231,24 @@ open class Storage {
onProgressUpdate?(totalProgress, totalMinExpectedDuration)
}
})
self.migrationRequirementProcesser = Atomic(onMigrationRequirement)
// Store the logic to run when the migration completes
let migrationCompleted: (Swift.Result<Void, Error>) -> () = { [weak self] result in
// Process any unprocessed requirements which need to be processed before completion
// then clear out the state
self?.unprocessedMigrationRequirements.wrappedValue
.filter { $0.shouldProcessAtCompletionIfNotRequired }
.forEach { self?.migrationRequirementProcesser?.wrappedValue(nil, $0) }
self?.migrationsCompleted.mutate { $0 = true }
self?.migrationProgressUpdater = nil
self?.migrationRequirementProcesser = nil
SUKLegacy.clearLegacyDatabaseInstance()
// Reset in case there is a requirement on a migration which runs when returning from
// the background
self?.unprocessedMigrationRequirements.mutate { $0 = MigrationRequirement.allCases }
// Don't log anything in the case of a 'success' or if the database is suspended (the
// latter will happen if the user happens to return to the background too quickly on
// launch so is unnecessarily alarming, it also gets caught and logged separately by
@ -283,6 +298,22 @@ open class Storage {
}
}
public func willStartMigration(_ db: Database, _ migration: Migration.Type) {
let unprocessedRequirements: Set<MigrationRequirement> = migration.requirements.asSet()
.intersection(unprocessedMigrationRequirements.wrappedValue.asSet())
// No need to do anything if there are no unprocessed requirements
guard !unprocessedRequirements.isEmpty else { return }
// Process all of the requirements for this migration
unprocessedRequirements.forEach { migrationRequirementProcesser?.wrappedValue(db, $0) }
// Remove any processed requirements from the list (don't want to process them multiple times)
unprocessedMigrationRequirements.mutate {
$0 = Array($0.asSet().subtracting(migration.requirements.asSet()))
}
}
public static func update(
progress: CGFloat,
for migration: Migration.Type,

@ -8,17 +8,21 @@ public protocol Migration {
static var identifier: String { get }
static var needsConfigSync: Bool { get }
static var minExpectedRunDuration: TimeInterval { get }
static var requirements: [MigrationRequirement] { get }
static func migrate(_ db: Database) throws
}
public extension Migration {
static var requirements: [MigrationRequirement] { [] }
static func loggedMigrate(
_ storage: Storage?,
targetIdentifier: TargetMigrations.Identifier
) -> ((_ db: Database) throws -> ()) {
return { (db: Database) in
SNLogNotTests("[Migration Info] Starting \(targetIdentifier.key(with: self))")
storage?.willStartMigration(db, self)
storage?.internalCurrentlyRunningMigration.mutate { $0 = (targetIdentifier, self) }
defer { storage?.internalCurrentlyRunningMigration.mutate { $0 = nil } }

@ -0,0 +1,13 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
public enum MigrationRequirement: CaseIterable {
case sessionUtilStateLoaded
var shouldProcessAtCompletionIfNotRequired: Bool {
switch self {
case .sessionUtilStateLoaded: return true
}
}
}

@ -6,8 +6,6 @@ public final class Features {
public static let useOnionRequests: Bool = true
public static let useTestnet: Bool = false
public static let useSharedUtilForUserConfig: Bool = true // TODO: Base this off a timestamp
// public static let useNewDisappearingMessagesConfig: Bool = Date().timeIntervalSince1970 > 1671062400 // 15/12/2022
public static let useNewDisappearingMessagesConfig: Bool = true
}

@ -73,23 +73,29 @@ public enum AppSetup {
SNUIKit.self
],
onProgressUpdate: migrationProgressChanged,
onComplete: { result, needsConfigSync in
// After the migrations have run but before the migration completion we load the
// SessionUtil state and update the 'needsConfigSync' flag based on whether the
// configs also need to be sync'ed
if Identity.userExists() {
SessionUtil.loadState(
userPublicKey: getUserHexEncodedPublicKey(),
ed25519SecretKey: Identity.fetchUserEd25519KeyPair()?.secretKey
)
onMigrationRequirement: { db, requirement in
switch requirement {
case .sessionUtilStateLoaded:
guard Identity.userExists(db) else { return }
// After the migrations have run but before the migration completion we load the
// SessionUtil state
SessionUtil.loadState(
db,
userPublicKey: getUserHexEncodedPublicKey(db),
ed25519SecretKey: Identity.fetchUserEd25519KeyPair(db)?.secretKey
)
}
},
onComplete: { result, needsConfigSync in
// Refresh the migration state for 'SessionUtil' so it's logic can start running
// correctly when called (doing this here instead of automatically via the
// `SessionUtil.userConfigsEnabled` property to avoid having to use the correct
// method when calling within a database read/write closure)
Storage.shared.read { db in SessionUtil.refreshingUserConfigsEnabled(db) }
// The 'needsConfigSync' flag should be based on whether either a migration or the
// configs need to be sync'ed
migrationsCompletion(result, (needsConfigSync || SessionUtil.needsSync))
// The 'if' is only there to prevent the "variable never read" warning from showing

Loading…
Cancel
Save