chore: merged what can be between user and group sync job

pull/2873/head
Audric Ackermann 2 years ago
parent d9300e67a0
commit c14276200e

@ -1,5 +1,10 @@
import { GroupPubkeyType, PubkeyType } from 'libsession_util_nodejs';
import { SnodeNamespaces, SnodeNamespacesGroup } from './namespaces';
import {
SnodeNamespaces,
SnodeNamespacesGroup,
SnodeNamespacesGroupConfig,
UserConfigNamespaces,
} from './namespaces';
export type SwarmForSubRequest = { method: 'get_swarm'; params: { pubkey: string } };
@ -108,14 +113,24 @@ export type DeleteFromNodeWithTimestampParams = {
} & DeleteSigParameters;
export type DeleteByHashesFromNodeParams = { messages: Array<string> } & DeleteSigParameters;
export type StoreOnNodeData = {
pubkey: GroupPubkeyType | PubkeyType;
type StoreOnNodeShared = {
networkTimestamp: number;
namespace: number;
data: Uint8Array;
ttl: number;
};
type StoreOnNodeGroupConfig = StoreOnNodeShared & {
pubkey: GroupPubkeyType;
namespace: SnodeNamespacesGroupConfig;
};
type StoreOnNodeUserConfig = StoreOnNodeShared & {
pubkey: PubkeyType;
namespace: UserConfigNamespaces;
};
export type StoreOnNodeData = StoreOnNodeGroupConfig | StoreOnNodeUserConfig;
export type StoreOnNodeSubRequest = { method: 'store'; params: StoreOnNodeParams };
export type NetworkTimeSubRequest = { method: 'info'; params: object };
@ -179,7 +194,8 @@ export type SnodeApiSubRequests =
// eslint-disable-next-line @typescript-eslint/array-type
export type NonEmptyArray<T> = [T, ...T[]];
export type NotEmptyArrayOfBatchResults = NonEmptyArray<{
export type BatchResultEntry = {
code: number;
body: Record<string, any>;
}>;
};
export type NotEmptyArrayOfBatchResults = NonEmptyArray<BatchResultEntry>;

@ -57,7 +57,7 @@ export type SnodeNamespacesLegacyGroup = PickEnum<
SnodeNamespaces.LegacyClosedGroup
>;
type SnodeNamespacesGroupConfig = PickEnum<
export type SnodeNamespacesGroupConfig = PickEnum<
SnodeNamespaces,
| SnodeNamespaces.ClosedGroupInfo
| SnodeNamespaces.ClosedGroupMembers
@ -71,10 +71,7 @@ export type SnodeNamespacesGroup =
| SnodeNamespacesGroupConfig
| PickEnum<SnodeNamespaces, SnodeNamespaces.ClosedGroupMessages>;
export type SnodeNamespacesUser = PickEnum<
SnodeNamespaces,
SnodeNamespaces.UserContacts | SnodeNamespaces.UserProfile | SnodeNamespaces.Default
>;
export type SnodeNamespacesUser = PickEnum<SnodeNamespaces, SnodeNamespaces.Default>;
export type UserConfigNamespaces = PickEnum<
SnodeNamespaces,

@ -45,7 +45,7 @@ async function handleGroupSharedConfigMessages(
// do the merge with our current state
await MetaGroupWrapperActions.metaMerge(groupPk, toMerge);
// save updated dumps to the DB right away
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk);
await LibSessionUtil.saveDumpsToDb(groupPk);
// refresh the redux slice with the merged result
window.inboxStore.dispatch(

@ -1,26 +1,19 @@
/* eslint-disable no-await-in-loop */
import { PubkeyType } from 'libsession_util_nodejs';
import { isArray, isEmpty, isNumber, isString } from 'lodash';
import { isArray, isEmpty, isNumber } from 'lodash';
import { v4 } from 'uuid';
import { UserUtils } from '../..';
import { ConfigDumpData } from '../../../../data/configDump/configDump';
import { ConfigurationSyncJobDone } from '../../../../shims/events';
import { isSignInByLinking } from '../../../../util/storage';
import { GenericWrapperActions } from '../../../../webworker/workers/browser/libsession_worker_interface';
import {
NotEmptyArrayOfBatchResults,
StoreOnNodeData,
} from '../../../apis/snode_api/SnodeRequestTypes';
import { StoreOnNodeData } from '../../../apis/snode_api/SnodeRequestTypes';
import { GetNetworkTime } from '../../../apis/snode_api/getNetworkTime';
import { TTL_DEFAULT } from '../../../constants';
import { ConvoHub } from '../../../conversations';
import { MessageSender } from '../../../sending/MessageSender';
import { allowOnlyOneAtATime } from '../../Promise';
import {
LibSessionUtil,
PendingChangesForUs,
UserSingleDestinationChanges,
} from '../../libsession/libsession_utils';
import { LibSessionUtil, UserSuccessfulChange } from '../../libsession/libsession_utils';
import { runners } from '../JobRunner';
import {
AddJobCheckReturn,
@ -38,78 +31,18 @@ const defaultMaxAttempts = 2;
*/
let lastRunConfigSyncJobTimestamp: number | null = null;
type UserSuccessfulChange = {
pushed: PendingChangesForUs;
updatedHash: string;
};
/**
* This function is run once we get the results from the multiple batch-send.
*/
function resultsToSuccessfulChange(
result: NotEmptyArrayOfBatchResults | null,
request: UserSingleDestinationChanges
): Array<UserSuccessfulChange> {
const successfulChanges: Array<UserSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const batchResult = result[j];
const messagePostedHashes = batchResult?.body?.hash;
if (batchResult.code === 200 && isString(messagePostedHashes) && request.messages?.[j]) {
// the library keeps track of the hashes to push and pushed using the hashes now
successfulChanges.push({
updatedHash: messagePostedHashes,
pushed: request.messages?.[j],
});
}
}
return successfulChanges;
}
async function buildAndSaveDumpsToDB(
async function confirmPushedAndDump(
changes: Array<UserSuccessfulChange>,
us: string
): Promise<void> {
for (let i = 0; i < changes.length; i++) {
const change = changes[i];
const variant = LibSessionUtil.userNamespaceToVariant(change.pushed.namespace);
const needsDump = await LibSessionUtil.markAsPushed(
await GenericWrapperActions.confirmPushed(
variant,
change.pushed.seqno.toNumber(),
change.updatedHash
);
if (!needsDump) {
continue;
}
const dump = await GenericWrapperActions.dump(variant);
await ConfigDumpData.saveConfigDump({
data: dump,
publicKey: us,
variant,
});
}
}
async function saveDumpsNeededToDB(us: string) {
for (let i = 0; i < LibSessionUtil.requiredUserVariants.length; i++) {
const variant = LibSessionUtil.requiredUserVariants[i];
const needsDump = await GenericWrapperActions.needsDump(variant);
if (!needsDump) {
@ -139,16 +72,16 @@ async function pushChangesToUserSwarmIfNeeded() {
}
// save the dumps to DB even before trying to push them, so at least we have an up to date dumps in the DB in case of crash, no network etc
await saveDumpsNeededToDB(us);
const singleDestChanges = await LibSessionUtil.pendingChangesForUs();
await LibSessionUtil.saveDumpsToDb(us);
const changesToPush = await LibSessionUtil.pendingChangesForUs();
// If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case)
if (isEmpty(singleDestChanges?.messages)) {
if (isEmpty(changesToPush?.messages)) {
triggerConfSyncJobDone();
return RunJobResult.Success;
}
const msgs: Array<StoreOnNodeData> = singleDestChanges.messages.map(item => {
const msgs: Array<StoreOnNodeData> = changesToPush.messages.map(item => {
return {
namespace: item.namespace,
pubkey: us,
@ -158,14 +91,10 @@ async function pushChangesToUserSwarmIfNeeded() {
};
});
const result = await MessageSender.sendEncryptedDataToSnode(
msgs,
us,
singleDestChanges.allOldHashes
);
const result = await MessageSender.sendEncryptedDataToSnode(msgs, us, changesToPush.allOldHashes);
const expectedReplyLength =
singleDestChanges.messages.length + (singleDestChanges.allOldHashes.size ? 1 : 0);
changesToPush.messages.length + (changesToPush.allOldHashes.size ? 1 : 0);
// we do a sequence call here. If we do not have the right expected number of results, consider it a failure
if (!isArray(result) || result.length !== expectedReplyLength) {
window.log.info(
@ -175,14 +104,14 @@ async function pushChangesToUserSwarmIfNeeded() {
return RunJobResult.RetryJobIfPossible;
}
const changes = resultsToSuccessfulChange(result, singleDestChanges);
const changes = LibSessionUtil.batchResultsToUserSuccessfulChange(result, changesToPush);
if (isEmpty(changes)) {
return RunJobResult.RetryJobIfPossible;
}
// Now that we have the successful changes, we need to mark them as pushed and
// generate any config dumps which need to be stored
await buildAndSaveDumpsToDB(changes, us);
await confirmPushedAndDump(changes, us);
triggerConfSyncJobDone();
return RunJobResult.Success;
}

@ -1,13 +1,11 @@
/* eslint-disable no-await-in-loop */
import { GroupPubkeyType } from 'libsession_util_nodejs';
import { isArray, isEmpty, isNumber, isString } from 'lodash';
import { isArray, isEmpty, isNumber } from 'lodash';
import { UserUtils } from '../..';
import { assertUnreachable } from '../../../../types/sqlSharedTypes';
import { isSignInByLinking } from '../../../../util/storage';
import { MetaGroupWrapperActions } from '../../../../webworker/workers/browser/libsession_worker_interface';
import {
NotEmptyArrayOfBatchResults,
StoreOnNodeData,
} from '../../../apis/snode_api/SnodeRequestTypes';
import { StoreOnNodeData } from '../../../apis/snode_api/SnodeRequestTypes';
import { GetNetworkTime } from '../../../apis/snode_api/getNetworkTime';
import { SnodeNamespaces } from '../../../apis/snode_api/namespaces';
import { TTL_DEFAULT } from '../../../constants';
@ -15,11 +13,7 @@ import { ConvoHub } from '../../../conversations';
import { MessageSender } from '../../../sending/MessageSender';
import { PubKey } from '../../../types';
import { allowOnlyOneAtATime } from '../../Promise';
import {
GroupSingleDestinationChanges,
LibSessionUtil,
PendingChangesForGroup,
} from '../../libsession/libsession_utils';
import { GroupSuccessfulChange, LibSessionUtil } from '../../libsession/libsession_utils';
import { runners } from '../JobRunner';
import {
AddJobCheckReturn,
@ -27,7 +21,6 @@ import {
PersistedJob,
RunJobResult,
} from '../PersistedJob';
import { assertUnreachable } from '../../../../types/sqlSharedTypes';
const defaultMsBetweenRetries = 15000; // a long time between retries, to avoid running multiple jobs at the same time, when one was postponed at the same time as one already planned (5s)
const defaultMaxAttempts = 2;
@ -38,50 +31,7 @@ const defaultMaxAttempts = 2;
*/
const lastRunConfigSyncJobTimestamps = new Map<string, number | null>();
export type GroupSuccessfulChange = {
pushed: PendingChangesForGroup;
updatedHash: string;
};
/**
* This function is run once we get the results from the multiple batch-send.
*/
function resultsToSuccessfulChange(
result: NotEmptyArrayOfBatchResults | null,
request: GroupSingleDestinationChanges
): Array<GroupSuccessfulChange> {
const successfulChanges: Array<GroupSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const batchResult = result[j];
const messagePostedHashes = batchResult?.body?.hash;
if (batchResult.code === 200 && isString(messagePostedHashes) && request.messages?.[j].data) {
// libsession keeps track of the hashes to push and pushed using the hashes now
successfulChanges.push({
updatedHash: messagePostedHashes,
pushed: request.messages?.[j],
});
}
}
return successfulChanges;
}
async function buildAndSaveDumpsToDB(
async function confirmPushedAndDump(
changes: Array<GroupSuccessfulChange>,
groupPk: GroupPubkeyType
): Promise<void> {
@ -112,37 +62,37 @@ async function buildAndSaveDumpsToDB(
}
await MetaGroupWrapperActions.metaConfirmPushed(...toConfirm);
return LibSessionUtil.saveMetaGroupDumpToDb(groupPk);
return LibSessionUtil.saveDumpsToDb(groupPk);
}
async function pushChangesToGroupSwarmIfNeeded(groupPk: GroupPubkeyType): Promise<RunJobResult> {
// save the dumps to DB even before trying to push them, so at least we have an up to date dumps in the DB in case of crash, no network etc
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk);
const singleDestChanges = await LibSessionUtil.pendingChangesForGroup(groupPk);
await LibSessionUtil.saveDumpsToDb(groupPk);
const changesToPush = await LibSessionUtil.pendingChangesForGroup(groupPk);
// If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case)
if (isEmpty(singleDestChanges?.messages)) {
if (isEmpty(changesToPush?.messages)) {
return RunJobResult.Success;
}
const msgs: Array<StoreOnNodeData> = singleDestChanges.messages.map(item => {
const msgs: Array<StoreOnNodeData> = changesToPush.messages.map(item => {
return {
namespace: item.namespace,
pubkey: groupPk,
networkTimestamp: GetNetworkTime.getNowWithNetworkOffset(),
ttl: TTL_DEFAULT.TTL_CONFIG,
data: item.data,
data: item.ciphertext,
};
});
const result = await MessageSender.sendEncryptedDataToSnode(
msgs,
groupPk,
singleDestChanges.allOldHashes
changesToPush.allOldHashes
);
const expectedReplyLength =
singleDestChanges.messages.length + (singleDestChanges.allOldHashes.size ? 1 : 0);
changesToPush.messages.length + (changesToPush.allOldHashes.size ? 1 : 0);
// we do a sequence call here. If we do not have the right expected number of results, consider it a failure
if (!isArray(result) || result.length !== expectedReplyLength) {
@ -154,14 +104,14 @@ async function pushChangesToGroupSwarmIfNeeded(groupPk: GroupPubkeyType): Promis
return RunJobResult.RetryJobIfPossible;
}
const changes = GroupSync.resultsToSuccessfulChange(result, singleDestChanges);
const changes = LibSessionUtil.batchResultsToGroupSuccessfulChange(result, changesToPush);
if (isEmpty(changes)) {
return RunJobResult.RetryJobIfPossible;
}
// Now that we have the successful changes, we need to mark them as pushed and
// generate any config dumps which need to be stored
await buildAndSaveDumpsToDB(changes, groupPk);
await confirmPushedAndDump(changes, groupPk);
return RunJobResult.Success;
}
@ -283,7 +233,6 @@ async function queueNewJobIfNeeded(groupPk: GroupPubkeyType) {
export const GroupSync = {
GroupSyncJob,
pushChangesToGroupSwarmIfNeeded,
resultsToSuccessfulChange,
queueNewJobIfNeeded: (groupPk: GroupPubkeyType) =>
allowOnlyOneAtATime(`GroupSyncJob-oneAtAtTime-${groupPk}`, () => queueNewJobIfNeeded(groupPk)),
};

@ -1,8 +1,8 @@
/* eslint-disable no-await-in-loop */
/* eslint-disable import/extensions */
/* eslint-disable import/no-unresolved */
import { GroupPubkeyType } from 'libsession_util_nodejs';
import { compact, difference, omit } from 'lodash';
import { GroupPubkeyType, PubkeyType } from 'libsession_util_nodejs';
import { compact, difference, isString, omit } from 'lodash';
import Long from 'long';
import { UserUtils } from '..';
import { ConfigDumpData } from '../../../data/configDump/configDump';
@ -20,6 +20,10 @@ import { SnodeNamespaces, UserConfigNamespaces } from '../../apis/snode_api/name
import { ed25519Str } from '../../onions/onionPath';
import { PubKey } from '../../types';
import { ConfigurationSync } from '../job_runners/jobs/ConfigurationSyncJob';
import {
BatchResultEntry,
NotEmptyArrayOfBatchResults,
} from '../../apis/snode_api/SnodeRequestTypes';
const requiredUserVariants: Array<ConfigWrapperUser> = [
'UserConfig',
@ -95,88 +99,85 @@ async function initializeLibSessionUtilWrappers() {
// No need to load the meta group wrapper here. We will load them once the SessionInbox is loaded with a redux action
}
export type PendingChangesForUs = {
type PendingChangesShared = {
ciphertext: Uint8Array;
};
export type PendingChangesForUs = PendingChangesShared & {
seqno: Long;
namespace: UserConfigNamespaces;
};
type PendingChangesForGroupNonKey = {
data: Uint8Array;
type PendingChangesForGroupNonKey = PendingChangesShared & {
seqno: Long;
namespace: SnodeNamespaces.ClosedGroupInfo | SnodeNamespaces.ClosedGroupMembers;
type: Extract<ConfigWrapperGroupDetailed, 'GroupInfo' | 'GroupMember'>;
};
type PendingChangesForGroupKey = {
data: Uint8Array;
ciphertext: Uint8Array;
namespace: SnodeNamespaces.ClosedGroupKeys;
type: Extract<ConfigWrapperGroupDetailed, 'GroupKeys'>;
};
export type PendingChangesForGroup = PendingChangesForGroupNonKey | PendingChangesForGroupKey;
type SingleDestinationChanges<T extends PendingChangesForGroup | PendingChangesForUs> = {
type DestinationChanges<T extends PendingChangesForGroup | PendingChangesForUs> = {
messages: Array<T>;
allOldHashes: Set<string>;
};
export type UserSingleDestinationChanges = SingleDestinationChanges<PendingChangesForUs>;
export type GroupSingleDestinationChanges = SingleDestinationChanges<PendingChangesForGroup>;
export type UserDestinationChanges = DestinationChanges<PendingChangesForUs>;
export type GroupDestinationChanges = DestinationChanges<PendingChangesForGroup>;
async function pendingChangesForUs(): Promise<UserSingleDestinationChanges> {
const us = UserUtils.getOurPubKeyStrFromCache();
const dumps = await ConfigDumpData.getAllDumpsWithoutDataFor(us);
export type UserSuccessfulChange = {
pushed: PendingChangesForUs;
updatedHash: string;
};
// Ensure we always check the required user config types for changes even if there is no dump
// data yet (to deal with first launch cases)
LibSessionUtil.requiredUserVariants.forEach(requiredVariant => {
if (!dumps.some(m => m.publicKey === us && m.variant === requiredVariant)) {
dumps.push({
publicKey: us,
variant: requiredVariant,
});
}
});
export type GroupSuccessfulChange = {
pushed: PendingChangesForGroup;
updatedHash: string;
};
const results: UserSingleDestinationChanges = { messages: [], allOldHashes: new Set() };
/**
* Fetch what needs to be pushed for all of the current user's wrappers.
*/
async function pendingChangesForUs(): Promise<UserDestinationChanges> {
const results: UserDestinationChanges = { messages: [], allOldHashes: new Set() };
const variantsNeedingPush = new Set<ConfigWrapperUser>();
const userVariants = LibSessionUtil.requiredUserVariants;
for (let index = 0; index < userVariants.length; index++) {
const variant = userVariants[index];
for (let index = 0; index < dumps.length; index++) {
const dump = dumps[index];
const variant = dump.variant;
if (!isUserConfigWrapperType(variant)) {
// this shouldn't happen for our pubkey.
continue;
}
const needsPush = await GenericWrapperActions.needsPush(variant);
if (!needsPush) {
continue;
}
variantsNeedingPush.add(variant);
const { data, seqno, hashes, namespace } = await GenericWrapperActions.push(variant);
variantsNeedingPush.add(variant);
results.messages.push({
ciphertext: data,
seqno: Long.fromNumber(seqno),
namespace,
namespace, // we only use the namespace to know to wha
});
hashes.forEach(hash => {
results.allOldHashes.add(hash);
});
hashes.forEach(results.allOldHashes.add); // add all the hashes to the set
}
window.log.info(`those variants needs push: "${[...variantsNeedingPush]}"`);
window.log.info(`those user variants needs push: "${[...variantsNeedingPush]}"`);
return results;
}
// we link the namespace to the type of what each wrapper needs
async function pendingChangesForGroup(
groupPk: GroupPubkeyType
): Promise<GroupSingleDestinationChanges> {
/**
* Fetch what needs to be pushed for the specified group public key.
* @param groupPk the public key of the group to fetch the details off
* @returns an object with a list of messages to be pushed and the list of hashes to bump expiry, server side
*/
async function pendingChangesForGroup(groupPk: GroupPubkeyType): Promise<GroupDestinationChanges> {
if (!PubKey.isClosedGroupV2(groupPk)) {
throw new Error(`pendingChangesForGroup only works for user or 03 group pubkeys`);
}
@ -195,7 +196,7 @@ async function pendingChangesForGroup(
if (groupKeys) {
results.push({
type: 'GroupKeys',
data: groupKeys.data,
ciphertext: groupKeys.data,
namespace: groupKeys.namespace,
});
}
@ -203,7 +204,7 @@ async function pendingChangesForGroup(
if (groupInfo) {
results.push({
type: 'GroupInfo',
data: groupInfo.data,
ciphertext: groupInfo.data,
seqno: Long.fromNumber(groupInfo.seqno),
namespace: groupInfo.namespace,
});
@ -211,7 +212,7 @@ async function pendingChangesForGroup(
if (groupMember) {
results.push({
type: 'GroupMember',
data: groupMember.data,
ciphertext: groupMember.data,
seqno: Long.fromNumber(groupMember.seqno),
namespace: groupMember.namespace,
});
@ -227,7 +228,12 @@ async function pendingChangesForGroup(
return { messages: results, allOldHashes };
}
/**
* Return the wrapperId associated with a specific namespace.
* WrapperIds are what we use in the database and with the libsession workers calls, and namespace is what we push to.
*/
function userNamespaceToVariant(namespace: UserConfigNamespaces) {
// TODO Might be worth migrating them to use directly the namespaces?
switch (namespace) {
case SnodeNamespaces.UserProfile:
return 'UserConfig';
@ -239,34 +245,141 @@ function userNamespaceToVariant(namespace: UserConfigNamespaces) {
return 'ConvoInfoVolatileConfig';
default:
assertUnreachable(namespace, `userNamespaceToVariant: Unsupported namespace: "${namespace}"`);
throw new Error('userNamespaceToVariant: Unsupported namespace:');
throw new Error('userNamespaceToVariant: Unsupported namespace:'); // ts is not happy without this
}
}
function resultShouldBeIncluded<T extends PendingChangesForGroup | PendingChangesForUs>(
msgPushed: T,
batchResult: BatchResultEntry
) {
const hash = batchResult.body?.hash;
if (batchResult.code === 200 && isString(hash) && msgPushed.ciphertext) {
return {
hash,
pushed: msgPushed,
};
}
return null;
}
/**
* Returns true if the config needs to be dumped afterwards
* This function is run once we get the results from the multiple batch-send for the group push.
* Note: the logic is the same as `batchResultsToUserSuccessfulChange` but I couldn't make typescript happy.
*/
async function markAsPushed(variant: ConfigWrapperUser, seqno: number, hash: string) {
await GenericWrapperActions.confirmPushed(variant, seqno, hash);
return GenericWrapperActions.needsDump(variant);
function batchResultsToGroupSuccessfulChange(
result: NotEmptyArrayOfBatchResults | null,
request: GroupDestinationChanges
): Array<GroupSuccessfulChange> {
const successfulChanges: Array<GroupSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const msgPushed = request.messages?.[j];
const shouldBe = resultShouldBeIncluded(msgPushed, result[j]);
if (shouldBe) {
// libsession keeps track of the hashes to push and the one pushed
successfulChanges.push({
updatedHash: shouldBe.hash,
pushed: shouldBe.pushed,
});
}
}
return successfulChanges;
}
/**
* If a dump is needed for that metagroup wrapper, dump it to the Database
* This function is run once we get the results from the multiple batch-send for the user push.
* Note: the logic is the same as `batchResultsToGroupSuccessfulChange` but I couldn't make typescript happy.
*/
async function saveMetaGroupDumpToDb(groupPk: GroupPubkeyType) {
const metaNeedsDump = await MetaGroupWrapperActions.needsDump(groupPk);
// save the concatenated dumps as a single entry in the DB if any of the dumps had a need for dump
if (metaNeedsDump) {
const dump = await MetaGroupWrapperActions.metaDump(groupPk);
function batchResultsToUserSuccessfulChange(
result: NotEmptyArrayOfBatchResults | null,
request: UserDestinationChanges
): Array<UserSuccessfulChange> {
const successfulChanges: Array<UserSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const msgPushed = request.messages?.[j];
const shouldBe = resultShouldBeIncluded(msgPushed, result[j]);
if (shouldBe) {
// libsession keeps track of the hashes to push and the one pushed
successfulChanges.push({
updatedHash: shouldBe.hash,
pushed: shouldBe.pushed,
});
}
}
return successfulChanges;
}
/**
* Check if the wrappers related to that pubkeys need to be dumped to the DB, and if yes, do it.
*/
async function saveDumpsToDb(pubkey: PubkeyType | GroupPubkeyType) {
// first check if this is relating a group
if (PubKey.isClosedGroupV2(pubkey)) {
const metaNeedsDump = await MetaGroupWrapperActions.needsDump(pubkey);
// save the concatenated dumps as a single entry in the DB if any of the dumps had a need for dump
if (metaNeedsDump) {
const dump = await MetaGroupWrapperActions.metaDump(pubkey);
await ConfigDumpData.saveConfigDump({
data: dump,
publicKey: pubkey,
variant: `MetaGroupConfig-${pubkey}`,
});
window.log.debug(`Saved dumps for metagroup ${ed25519Str(pubkey)}`);
} else {
window.log.debug(`No need to update local dumps for metagroup ${ed25519Str(pubkey)}`);
}
return;
}
// here, we can only be called with our current user pubkey
if (pubkey !== UserUtils.getOurPubKeyStrFromCache()) {
throw new Error('saveDumpsToDb only supports groupv2 and us pubkeys');
}
for (let i = 0; i < LibSessionUtil.requiredUserVariants.length; i++) {
const variant = LibSessionUtil.requiredUserVariants[i];
const needsDump = await GenericWrapperActions.needsDump(variant);
if (!needsDump) {
continue;
}
const dump = await GenericWrapperActions.dump(variant);
await ConfigDumpData.saveConfigDump({
data: dump,
publicKey: groupPk,
variant: `MetaGroupConfig-${groupPk}`,
publicKey: pubkey,
variant,
});
window.log.debug(`Saved dumps for metagroup ${ed25519Str(groupPk)}`);
} else {
window.log.debug(`No need to update local dumps for metagroup ${ed25519Str(groupPk)}`);
}
}
@ -276,6 +389,7 @@ export const LibSessionUtil = {
requiredUserVariants,
pendingChangesForUs,
pendingChangesForGroup,
markAsPushed,
saveMetaGroupDumpToDb,
saveDumpsToDb,
batchResultsToGroupSuccessfulChange,
batchResultsToUserSuccessfulChange,
};

@ -236,27 +236,22 @@ describe('JobRunner', () => {
expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(job.persistedData.identifier);
clock.tick(5000);
console.info('=========== awaiting first job ==========');
await runnerMulti.waitCurrentJob();
// just give some time for the runnerMulti to pick up a new job
await sleepFor(10);
expect(runnerMulti.getJobList()).to.deep.eq([]);
expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(null);
console.info('=========== awaited first job ==========');
// the first job should already be finished now
result = await runnerMulti.addJob(job2);
expect(result).to.eq('job_started');
expect(runnerMulti.getJobList()).to.deep.eq([job2.serializeJob()]);
console.info('=========== awaiting second job ==========');
// each job takes 5s to finish, so let's tick once the first one should be done
clock.tick(5010);
await runnerMulti.waitCurrentJob();
await sleepFor(10);
console.info('=========== awaited second job ==========');
expect(runnerMulti.getJobList()).to.deep.eq([]);
});

@ -1,6 +1,6 @@
import { expect } from 'chai';
import { GroupPubkeyType } from 'libsession_util_nodejs';
import { omit, pick } from 'lodash';
import { omit } from 'lodash';
import Long from 'long';
import Sinon from 'sinon';
import { ConfigDumpData } from '../../../../../../data/configDump/configDump';
@ -8,29 +8,27 @@ import { getSodiumNode } from '../../../../../../node/sodiumNode';
import { NotEmptyArrayOfBatchResults } from '../../../../../../session/apis/snode_api/SnodeRequestTypes';
import { GetNetworkTime } from '../../../../../../session/apis/snode_api/getNetworkTime';
import { SnodeNamespaces } from '../../../../../../session/apis/snode_api/namespaces';
import { TTL_DEFAULT } from '../../../../../../session/constants';
import { ConvoHub } from '../../../../../../session/conversations';
import { LibSodiumWrappers } from '../../../../../../session/crypto';
import { MessageSender } from '../../../../../../session/sending';
import { UserUtils } from '../../../../../../session/utils';
import { RunJobResult } from '../../../../../../session/utils/job_runners/PersistedJob';
import { GroupSync } from '../../../../../../session/utils/job_runners/jobs/GroupConfigJob';
import {
GroupDestinationChanges,
GroupSuccessfulChange,
GroupSync,
} from '../../../../../../session/utils/job_runners/jobs/GroupConfigJob';
import {
GroupSingleDestinationChanges,
LibSessionUtil,
PendingChangesForGroup,
} from '../../../../../../session/utils/libsession/libsession_utils';
import { MetaGroupWrapperActions } from '../../../../../../webworker/workers/browser/libsession_worker_interface';
import { TestUtils } from '../../../../../test-utils';
import { MessageSender } from '../../../../../../session/sending';
import { TypedStub } from '../../../../../test-utils/utils';
import { TTL_DEFAULT } from '../../../../../../session/constants';
function validInfo(sodium: LibSodiumWrappers) {
return {
type: 'GroupInfo',
data: sodium.randombytes_buf(12),
ciphertext: sodium.randombytes_buf(12),
seqno: Long.fromNumber(123),
namespace: SnodeNamespaces.ClosedGroupInfo,
timestamp: 1234,
@ -39,7 +37,7 @@ function validInfo(sodium: LibSodiumWrappers) {
function validMembers(sodium: LibSodiumWrappers) {
return {
type: 'GroupMember',
data: sodium.randombytes_buf(12),
ciphertext: sodium.randombytes_buf(12),
seqno: Long.fromNumber(321),
namespace: SnodeNamespaces.ClosedGroupMembers,
timestamp: 4321,
@ -49,13 +47,13 @@ function validMembers(sodium: LibSodiumWrappers) {
function validKeys(sodium: LibSodiumWrappers) {
return {
type: 'GroupKeys',
data: sodium.randombytes_buf(12),
ciphertext: sodium.randombytes_buf(12),
namespace: SnodeNamespaces.ClosedGroupKeys,
timestamp: 3333,
} as const;
}
describe('GroupSyncJob saveMetaGroupDumpToDb', () => {
describe('GroupSyncJob saveDumpsToDb', () => {
let groupPk: GroupPubkeyType;
beforeEach(async () => {});
@ -71,7 +69,7 @@ describe('GroupSyncJob saveMetaGroupDumpToDb', () => {
Sinon.stub(MetaGroupWrapperActions, 'needsDump').resolves(false);
const metaDump = Sinon.stub(MetaGroupWrapperActions, 'metaDump').resolves(new Uint8Array());
const saveConfigDump = Sinon.stub(ConfigDumpData, 'saveConfigDump').resolves();
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk);
await LibSessionUtil.saveDumpsToDb(groupPk);
expect(saveConfigDump.callCount).to.be.equal(0);
expect(metaDump.callCount).to.be.equal(0);
});
@ -81,7 +79,7 @@ describe('GroupSyncJob saveMetaGroupDumpToDb', () => {
const dump = [1, 2, 3, 4, 5];
const metaDump = Sinon.stub(MetaGroupWrapperActions, 'metaDump').resolves(new Uint8Array(dump));
const saveConfigDump = Sinon.stub(ConfigDumpData, 'saveConfigDump').resolves();
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk);
await LibSessionUtil.saveDumpsToDb(groupPk);
expect(saveConfigDump.callCount).to.be.equal(1);
expect(metaDump.callCount).to.be.equal(1);
expect(metaDump.firstCall.args).to.be.deep.eq([groupPk]);
@ -143,20 +141,20 @@ describe('GroupSyncJob pendingChangesForGroup', () => {
// check for the keys push content
expect(result.messages[0]).to.be.deep.eq({
type: 'GroupKeys',
data: new Uint8Array([3, 2, 1]),
ciphertext: new Uint8Array([3, 2, 1]),
namespace: 13,
});
// check for the info push content
expect(result.messages[1]).to.be.deep.eq({
type: 'GroupInfo',
data: new Uint8Array([1, 2, 3]),
ciphertext: new Uint8Array([1, 2, 3]),
namespace: 12,
seqno: Long.fromInt(pushResults.groupInfo.seqno),
});
// check for the members pusu content
expect(result.messages[2]).to.be.deep.eq({
type: 'GroupMember',
data: new Uint8Array([1, 2]),
ciphertext: new Uint8Array([1, 2]),
namespace: 14,
seqno: Long.fromInt(pushResults.groupMember.seqno),
});
@ -247,11 +245,14 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
});
it('no or empty results return empty array', () => {
expect(
GroupSync.resultsToSuccessfulChange(null, { allOldHashes: new Set(), messages: [] })
LibSessionUtil.batchResultsToGroupSuccessfulChange(null, {
allOldHashes: new Set(),
messages: [],
})
).to.be.deep.eq([]);
expect(
GroupSync.resultsToSuccessfulChange([] as any as NotEmptyArrayOfBatchResults, {
LibSessionUtil.batchResultsToGroupSuccessfulChange([] as any as NotEmptyArrayOfBatchResults, {
allOldHashes: new Set(),
messages: [],
})
@ -262,11 +263,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
const member = validMembers(sodium);
const info = validInfo(sodium);
const batchResults: NotEmptyArrayOfBatchResults = [{ code: 200, body: { hash: 'hash1' } }];
const request: GroupSingleDestinationChanges = {
const request: GroupDestinationChanges = {
allOldHashes: new Set(),
messages: [info, member],
};
const results = GroupSync.resultsToSuccessfulChange(batchResults, request);
const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([
{
updatedHash: 'hash1',
@ -282,11 +283,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
{ code: 200, body: { hash: 'hash1' } },
{ code: 200, body: { hash: 'hash2' } },
];
const request: GroupSingleDestinationChanges = {
const request: GroupDestinationChanges = {
allOldHashes: new Set(),
messages: [info, member],
};
const results = GroupSync.resultsToSuccessfulChange(batchResults, request);
const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([
{
updatedHash: 'hash1',
@ -306,11 +307,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
{ code: 200, body: { hash: 123 as any as string } },
{ code: 200, body: { hash: 'hash2' } },
];
const request: GroupSingleDestinationChanges = {
const request: GroupDestinationChanges = {
allOldHashes: new Set(),
messages: [info, member],
};
const results = GroupSync.resultsToSuccessfulChange(batchResults, request);
const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([
{
updatedHash: 'hash2',
@ -322,16 +323,16 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
it('skip request item without data', () => {
const member = validMembers(sodium);
const info = validInfo(sodium);
const infoNoData = omit(info, 'data');
const infoNoData = omit(info, 'ciphertext');
const batchResults: NotEmptyArrayOfBatchResults = [
{ code: 200, body: { hash: 'hash1' } },
{ code: 200, body: { hash: 'hash2' } },
];
const request: GroupSingleDestinationChanges = {
const request: GroupDestinationChanges = {
allOldHashes: new Set(),
messages: [infoNoData as any as PendingChangesForGroup, member],
};
const results = GroupSync.resultsToSuccessfulChange(batchResults, request);
const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([
{
updatedHash: 'hash2',
@ -347,11 +348,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
{ code: 200, body: { hash: 'hash1' } },
{ code: 401, body: { hash: 'hash2' } },
];
const request: GroupSingleDestinationChanges = {
const request: GroupDestinationChanges = {
allOldHashes: new Set(),
messages: [info, member],
};
const results = GroupSync.resultsToSuccessfulChange(batchResults, request);
const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([
{
updatedHash: 'hash1',
@ -362,7 +363,7 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
// another test swapping the results
batchResults[0].code = 401;
batchResults[1].code = 200;
const results2 = GroupSync.resultsToSuccessfulChange(batchResults, request);
const results2 = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results2).to.be.deep.eq([
{
updatedHash: 'hash2',
@ -379,7 +380,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
let sendStub: TypedStub<typeof MessageSender, 'sendEncryptedDataToSnode'>;
let pendingChangesForGroupStub: TypedStub<typeof LibSessionUtil, 'pendingChangesForGroup'>;
let saveMetaGroupDumpToDbStub: TypedStub<typeof LibSessionUtil, 'saveMetaGroupDumpToDb'>;
let saveDumpsToDbStub: TypedStub<typeof LibSessionUtil, 'saveDumpsToDb'>;
beforeEach(async () => {
sodium = await getSodiumNode();
@ -389,7 +390,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
Sinon.stub(UserUtils, 'getUserED25519KeyPairBytes').resolves(userkeys.ed25519KeyPair);
pendingChangesForGroupStub = Sinon.stub(LibSessionUtil, 'pendingChangesForGroup');
saveMetaGroupDumpToDbStub = Sinon.stub(LibSessionUtil, 'saveMetaGroupDumpToDb');
saveDumpsToDbStub = Sinon.stub(LibSessionUtil, 'saveDumpsToDb');
sendStub = Sinon.stub(MessageSender, 'sendEncryptedDataToSnode');
});
afterEach(() => {
@ -402,8 +403,8 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
expect(result).to.be.eq(RunJobResult.Success);
expect(sendStub.callCount).to.be.eq(0);
expect(pendingChangesForGroupStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
expect(saveDumpsToDbStub.callCount).to.be.eq(1);
expect(saveDumpsToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
});
it('calls sendEncryptedDataToSnode with the right data and retry if network returned nothing', async () => {
@ -422,11 +423,18 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
expect(result).to.be.eq(RunJobResult.RetryJobIfPossible); // not returning anything in the sendstub so network issue happened
expect(sendStub.callCount).to.be.eq(1);
expect(pendingChangesForGroupStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
expect(saveDumpsToDbStub.callCount).to.be.eq(1);
expect(saveDumpsToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
function expected(details: any) {
return { ...pick(details, 'data', 'namespace'), ttl, networkTimestamp, pubkey: groupPk };
console.warn('details', details);
return {
namespace: details.namespace,
data: details.ciphertext,
ttl,
networkTimestamp,
pubkey: groupPk,
};
}
const expectedInfo = expected(info);
@ -438,7 +446,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
]);
});
it('calls sendEncryptedDataToSnode with the right data and retry if network returned nothing', async () => {
it('calls sendEncryptedDataToSnode with the right data (and keys) and retry if network returned nothing', async () => {
const info = validInfo(sodium);
const member = validMembers(sodium);
const keys = validKeys(sodium);
@ -460,7 +468,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
updatedHash: 'hash2',
},
];
Sinon.stub(GroupSync, 'resultsToSuccessfulChange').returns(changes);
Sinon.stub(LibSessionUtil, 'batchResultsToGroupSuccessfulChange').returns(changes);
const metaConfirmPushed = Sinon.stub(MetaGroupWrapperActions, 'metaConfirmPushed').resolves();
sendStub.resolves([
@ -473,9 +481,9 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
expect(sendStub.callCount).to.be.eq(1);
expect(pendingChangesForGroupStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.callCount).to.be.eq(2);
expect(saveMetaGroupDumpToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
expect(saveMetaGroupDumpToDbStub.secondCall.args).to.be.deep.eq([groupPk]);
expect(saveDumpsToDbStub.callCount).to.be.eq(2);
expect(saveDumpsToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
expect(saveDumpsToDbStub.secondCall.args).to.be.deep.eq([groupPk]);
expect(metaConfirmPushed.callCount).to.be.eq(1);
expect(metaConfirmPushed.firstCall.args).to.be.deep.eq([
groupPk,

Loading…
Cancel
Save