Fixed a few small issues found when testing

Fixed a couple of issues with the ConfigurationSyncJob logic
Moved the proto parsing out of the MessageReceiveJob write block (to reduce time blocking writes)
Removed difficulty from the SendMessageResponse (deprecated and removed)
pull/856/head
Morgan Pretty 3 years ago
parent af9a135c08
commit 0abb09c0cf

@ -104,10 +104,11 @@ public class ConversationViewModel: OWSAudioPlayerDelegate {
public private(set) lazy var threadData: SessionThreadViewModel = SessionThreadViewModel( public private(set) lazy var threadData: SessionThreadViewModel = SessionThreadViewModel(
threadId: self.threadId, threadId: self.threadId,
threadVariant: self.initialThreadVariant, threadVariant: self.initialThreadVariant,
threadIsNoteToSelf: (self.threadId == getUserHexEncodedPublicKey()),
currentUserIsClosedGroupMember: (self.initialThreadVariant != .closedGroup ? currentUserIsClosedGroupMember: (self.initialThreadVariant != .closedGroup ?
nil : nil :
Storage.shared.read { db in Storage.shared.read { db in
try GroupMember GroupMember
.filter(GroupMember.Columns.groupId == self.threadId) .filter(GroupMember.Columns.groupId == self.threadId)
.filter(GroupMember.Columns.profileId == getUserHexEncodedPublicKey(db)) .filter(GroupMember.Columns.profileId == getUserHexEncodedPublicKey(db))
.filter(GroupMember.Columns.role == GroupMember.Role.standard) .filter(GroupMember.Columns.role == GroupMember.Role.standard)

@ -110,10 +110,17 @@ public enum ConfigurationSyncJob: JobExecutor {
.collect() .collect()
.eraseToAnyPublisher() .eraseToAnyPublisher()
} }
.map { (responses: [HTTP.BatchResponse]) -> [SuccessfulChange] in .flatMap { (responses: [HTTP.BatchResponse]) -> AnyPublisher<[SuccessfulChange], Error> in
// We make a sequence call for this so it's possible to get fewer responses than
// expected so if that happens fail and re-run later
guard responses.count == pendingSwarmConfigChanges.count else {
return Fail(error: HTTPError.invalidResponse)
.eraseToAnyPublisher()
}
// Process the response data into an easy to understand for (this isn't strictly // Process the response data into an easy to understand for (this isn't strictly
// needed but the code gets convoluted without this) // needed but the code gets convoluted without this)
zip(responses, pendingSwarmConfigChanges) let successfulChanges: [SuccessfulChange] = zip(responses, pendingSwarmConfigChanges)
.compactMap { (batchResponse: HTTP.BatchResponse, pendingSwarmChange: SingleDestinationChanges) -> [SuccessfulChange]? in .compactMap { (batchResponse: HTTP.BatchResponse, pendingSwarmChange: SingleDestinationChanges) -> [SuccessfulChange]? in
let maybePublicKey: String? = { let maybePublicKey: String? = {
switch pendingSwarmChange.destination { switch pendingSwarmChange.destination {
@ -145,6 +152,7 @@ public enum ConfigurationSyncJob: JobExecutor {
guard guard
let subResponse: HTTP.BatchSubResponse<SendMessagesResponse> = (next.response as? HTTP.BatchSubResponse<SendMessagesResponse>), let subResponse: HTTP.BatchSubResponse<SendMessagesResponse> = (next.response as? HTTP.BatchSubResponse<SendMessagesResponse>),
200...299 ~= subResponse.code, 200...299 ~= subResponse.code,
!subResponse.failedToParseBody,
let sendMessageResponse: SendMessagesResponse = subResponse.body let sendMessageResponse: SendMessagesResponse = subResponse.body
else { return } else { return }
@ -162,6 +170,10 @@ public enum ConfigurationSyncJob: JobExecutor {
} }
} }
.flatMap { $0 } .flatMap { $0 }
return Just(successfulChanges)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
} }
.map { (successfulChanges: [SuccessfulChange]) -> [ConfigDump] in .map { (successfulChanges: [SuccessfulChange]) -> [ConfigDump] in
// Now that we have the successful changes, we need to mark them as pushed and // Now that we have the successful changes, we need to mark them as pushed and
@ -189,6 +201,13 @@ public enum ConfigurationSyncJob: JobExecutor {
} }
} }
.sinkUntilComplete( .sinkUntilComplete(
receiveCompletion: { result in
switch result {
case .finished: break
case .failure(let error):
failure(job, error, false)
}
},
receiveValue: { (configDumps: [ConfigDump]) in receiveValue: { (configDumps: [ConfigDump]) in
// Flag to indicate whether the job should be finished or will run again // Flag to indicate whether the job should be finished or will run again
var shouldFinishCurrentJob: Bool = false var shouldFinishCurrentJob: Bool = false
@ -209,12 +228,17 @@ public enum ConfigurationSyncJob: JobExecutor {
let existingJob: Job = try? Job let existingJob: Job = try? Job
.filter(Job.Columns.id != job.id) .filter(Job.Columns.id != job.id)
.filter(Job.Columns.variant == Job.Variant.configurationSync) .filter(Job.Columns.variant == Job.Variant.configurationSync)
.fetchOne(db), .fetchOne(db)
!JobRunner.isCurrentlyRunning(existingJob)
{ {
_ = try existingJob // If the next job isn't currently running then delay it's start time
.with(nextRunTimestamp: nextRunTimestamp) // until the 'nextRunTimestamp'
.saved(db) if !JobRunner.isCurrentlyRunning(existingJob) {
_ = try existingJob
.with(nextRunTimestamp: nextRunTimestamp)
.saved(db)
}
// If there is another job then we should finish this one
shouldFinishCurrentJob = true shouldFinishCurrentJob = true
return job return job
} }
@ -302,10 +326,10 @@ public extension ConfigurationSyncJob {
@discardableResult static func createOrUpdateIfNeeded(_ db: Database) -> Job { @discardableResult static func createOrUpdateIfNeeded(_ db: Database) -> Job {
// Try to get an existing job (if there is one that's not running) // Try to get an existing job (if there is one that's not running)
if if
let existingJob: Job = try? Job let existingJobs: [Job] = try? Job
.filter(Job.Columns.variant == Job.Variant.configurationSync) .filter(Job.Columns.variant == Job.Variant.configurationSync)
.fetchOne(db), .fetchAll(db),
!JobRunner.isCurrentlyRunning(existingJob) let existingJob: Job = existingJobs.first(where: { !JobRunner.isCurrentlyRunning($0) })
{ {
return existingJob return existingJob
} }

@ -25,9 +25,24 @@ public enum MessageReceiveJob: JobExecutor {
} }
var updatedJob: Job = job var updatedJob: Job = job
var leastSevereError: Error? var lastError: Error?
let nonConfigMessages: [Details.MessageInfo] = details.messages var remainingMessagesToProcess: [Details.MessageInfo] = []
let nonConfigMessages: [(info: Details.MessageInfo, proto: SNProtoContent)] = details.messages
.filter { $0.variant != .sharedConfigMessage } .filter { $0.variant != .sharedConfigMessage }
.compactMap { messageInfo -> (info: Details.MessageInfo, proto: SNProtoContent)? in
do {
return (messageInfo, try SNProtoContent.parseData(messageInfo.serializedProtoData))
}
catch {
SNLog("Couldn't receive message due to error: \(error)")
lastError = error
// We failed to process this message but it is a retryable error
// so add it to the list to re-process
remainingMessagesToProcess.append(messageInfo)
return nil
}
}
let sharedConfigMessages: [SharedConfigMessage] = details.messages let sharedConfigMessages: [SharedConfigMessage] = details.messages
.compactMap { $0.message as? SharedConfigMessage } .compactMap { $0.message as? SharedConfigMessage }
@ -40,14 +55,12 @@ public enum MessageReceiveJob: JobExecutor {
) )
// Handle the remaining messages // Handle the remaining messages
var remainingMessagesToProcess: [Details.MessageInfo] = [] for (messageInfo, protoContent) in nonConfigMessages {
for messageInfo in nonConfigMessages {
do { do {
try MessageReceiver.handle( try MessageReceiver.handle(
db, db,
message: messageInfo.message, message: messageInfo.message,
associatedWithProto: try SNProtoContent.parseData(messageInfo.serializedProtoData), associatedWithProto: protoContent,
openGroupId: nil openGroupId: nil
) )
} }
@ -71,7 +84,7 @@ public enum MessageReceiveJob: JobExecutor {
default: default:
SNLog("Couldn't receive message due to error: \(error)") SNLog("Couldn't receive message due to error: \(error)")
leastSevereError = error lastError = error
// We failed to process this message but it is a retryable error // We failed to process this message but it is a retryable error
// so add it to the list to re-process // so add it to the list to re-process
@ -94,12 +107,12 @@ public enum MessageReceiveJob: JobExecutor {
} }
// Handle the result // Handle the result
switch leastSevereError { switch lastError {
case let error as MessageReceiverError where !error.isRetryable: case let error as MessageReceiverError where !error.isRetryable:
failure(updatedJob, error, true) failure(updatedJob, error, true)
case .some(let error): case .some(let error):
failure(updatedJob, error, false) // TODO: Confirm the 'noKeyPair' errors here aren't an issue failure(updatedJob, error, false)
case .none: case .none:
success(updatedJob, false) success(updatedJob, false)

@ -4,12 +4,10 @@ import Foundation
public class SendMessagesResponse: SnodeRecursiveResponse<SendMessagesResponse.SwarmItem> { public class SendMessagesResponse: SnodeRecursiveResponse<SendMessagesResponse.SwarmItem> {
private enum CodingKeys: String, CodingKey { private enum CodingKeys: String, CodingKey {
case difficulty
case hash case hash
case swarm case swarm
} }
public let difficulty: Int64
public let hash: String public let hash: String
// MARK: - Initialization // MARK: - Initialization
@ -17,7 +15,6 @@ public class SendMessagesResponse: SnodeRecursiveResponse<SendMessagesResponse.S
required init(from decoder: Decoder) throws { required init(from decoder: Decoder) throws {
let container: KeyedDecodingContainer<CodingKeys> = try decoder.container(keyedBy: CodingKeys.self) let container: KeyedDecodingContainer<CodingKeys> = try decoder.container(keyedBy: CodingKeys.self)
difficulty = try container.decode(Int64.self, forKey: .difficulty)
hash = try container.decode(String.self, forKey: .hash) hash = try container.decode(String.self, forKey: .hash)
try super.init(from: decoder) try super.init(from: decoder)

@ -549,7 +549,7 @@ public final class SnodeAPI {
var requests: [SnodeAPI.BatchRequest.Info] = targetedMessages var requests: [SnodeAPI.BatchRequest.Info] = targetedMessages
.map { message, namespace in .map { message, namespace in
// Check if this namespace requires authentication // Check if this namespace requires authentication
guard namespace.requiresReadAuthentication else { guard namespace.requiresWriteAuthentication else {
return BatchRequest.Info( return BatchRequest.Info(
request: SnodeRequest( request: SnodeRequest(
endpoint: .sendMessage, endpoint: .sendMessage,
@ -618,7 +618,7 @@ public final class SnodeAPI {
using: dependencies using: dependencies
) )
.eraseToAnyPublisher() .eraseToAnyPublisher()
.decoded(as: responseTypes, using: dependencies) .decoded(as: responseTypes, requireAllResults: false, using: dependencies)
.eraseToAnyPublisher() .eraseToAnyPublisher()
} }
.retry(maxRetryCount) .retry(maxRetryCount)

@ -81,6 +81,7 @@ public extension Decodable {
public extension AnyPublisher where Output == (ResponseInfoType, Data?), Failure == Error { public extension AnyPublisher where Output == (ResponseInfoType, Data?), Failure == Error {
func decoded( func decoded(
as types: HTTP.BatchResponseTypes, as types: HTTP.BatchResponseTypes,
requireAllResults: Bool = true,
using dependencies: Dependencies = Dependencies() using dependencies: Dependencies = Dependencies()
) -> AnyPublisher<HTTP.BatchResponse, Error> { ) -> AnyPublisher<HTTP.BatchResponse, Error> {
self self
@ -101,7 +102,7 @@ public extension AnyPublisher where Output == (ResponseInfoType, Data?), Failure
case let anyArray as [Any]: case let anyArray as [Any]:
dataArray = anyArray.compactMap { try? JSONSerialization.data(withJSONObject: $0) } dataArray = anyArray.compactMap { try? JSONSerialization.data(withJSONObject: $0) }
guard dataArray.count == types.count else { guard !requireAllResults || dataArray.count == types.count else {
return Fail(error: HTTPError.parsingFailed) return Fail(error: HTTPError.parsingFailed)
.eraseToAnyPublisher() .eraseToAnyPublisher()
} }
@ -110,7 +111,10 @@ public extension AnyPublisher where Output == (ResponseInfoType, Data?), Failure
guard guard
let resultsArray: [Data] = (anyDict["results"] as? [Any])? let resultsArray: [Data] = (anyDict["results"] as? [Any])?
.compactMap({ try? JSONSerialization.data(withJSONObject: $0) }), .compactMap({ try? JSONSerialization.data(withJSONObject: $0) }),
resultsArray.count == types.count (
!requireAllResults ||
resultsArray.count == types.count
)
else { else {
return Fail(error: HTTPError.parsingFailed) return Fail(error: HTTPError.parsingFailed)
.eraseToAnyPublisher() .eraseToAnyPublisher()

Loading…
Cancel
Save