fix: address PR reviews

pull/2873/head
Audric Ackermann 2 years ago
parent ed16e9b500
commit df3a188074

@ -34,7 +34,7 @@ window.sessionFeatureFlags = {
integrationTestEnv: Boolean(
process.env.NODE_APP_INSTANCE && process.env.NODE_APP_INSTANCE.includes('test-integration')
),
useClosedGroupV2: true,
useClosedGroupV2: true, // TODO DO NOT MERGE Remove after QA
debug: {
debugLogging: !_.isEmpty(process.env.SESSION_DEBUG),
debugLibsessionDumps: !_.isEmpty(process.env.SESSION_DEBUG_LIBSESSION_DUMPS),

@ -82,89 +82,6 @@ message DataExtractionNotification {
optional uint64 timestamp = 2;
}
message GroupUpdateInviteMessage {
// @required
required bytes groupIdentityPublicKey = 1;
// @required
required string name = 2;
// @required
required bytes memberSubkey = 3;
// @required
required bytes memberTag = 4;
optional bytes profileKey = 5;
optional LokiProfile profile = 6;
}
message GroupUpdateDeleteMessage {
// @required
required bytes groupIdentityPublicKey = 1;
// @required
required bytes encryptedMemberSubkey = 2;
}
message GroupUpdateInfoChangeMessage {
enum Type {
NAME = 1;
AVATAR = 2;
DISAPPEARING_MESSAGES = 3;
}
// @required
required Type type = 1;
optional string updatedName = 2;
optional uint32 updatedExpiration = 3;
}
message GroupUpdateMemberChangeMessage {
enum Type {
ADDED = 1;
REMOVED = 2;
PROMOTED = 3;
}
// @required
required Type type = 1;
repeated bytes memberPublicKeys = 2;
}
message GroupUpdatePromoteMessage {
// @required
required bytes memberPublicKey = 1;
// @required
required bytes encryptedGroupIdentityPrivateKey = 2;
}
message GroupUpdateMemberLeftMessage {
// the pubkey of the member left is included as part of the closed group encryption logic (senderIdentity on desktop)
}
message GroupUpdateInviteResponseMessage {
// @required
required bool isApproved = 1; // Whether the request was approved
optional bytes profileKey = 2;
optional LokiProfile profile = 3;
}
message GroupUpdatePromotionResponseMessage {
// @required
required bytes encryptedMemberPublicKey = 1;
}
message GroupUpdateDeleteMemberContentMessage {
repeated bytes memberPublicKeys = 2;
}
message GroupUpdateMessage {
optional GroupUpdateInviteMessage inviteMessage = 31;
optional GroupUpdateDeleteMessage deleteMessage = 32;
optional GroupUpdateInfoChangeMessage infoChangeMessage = 33;
optional GroupUpdateMemberChangeMessage memberChangeMessage = 34;
optional GroupUpdatePromoteMessage promoteMessage = 35;
optional GroupUpdateMemberLeftMessage memberLeftMessage = 36;
optional GroupUpdateInviteResponseMessage inviteResponse = 37;
optional GroupUpdatePromotionResponseMessage promotionResponse = 38;
optional GroupUpdateDeleteMemberContentMessage deleteMemberContent = 39;
}
@ -271,7 +188,7 @@ message DataMessage {
optional ClosedGroupControlMessage closedGroupControlMessage = 104;
optional string syncTarget = 105;
optional bool blocksCommunityMessageRequests = 106;
optional GroupUpdateMessage groupUpdateMessage = 120;}
}
message CallMessage {

@ -119,7 +119,10 @@ async function mergeUserConfigsWithIncomingUpdates(
const variant = LibSessionUtil.userNamespaceToVariant(namespace);
if (window.sessionFeatureFlags.debug.debugLibsessionDumps) {
await printDumpForDebug(`printDumpsForDebugging: before merge of ${variant}:`, variant);
await printDumpForDebug(
`printDumpsForDebugging: before merge of ${toMerge.length}, ${variant}:`,
variant
);
}
const hashesMerged = await GenericWrapperActions.merge(variant, toMerge);
@ -672,13 +675,13 @@ async function handleSingleGroupUpdateToLeave(toLeave: string) {
*/
async function handleGroupUpdate(latestEnvelopeTimestamp: number) {
// first let's check which groups needs to be joined or left by doing a diff of what is in the wrapper and what is in the DB
const allGoupsInWrapper = await UserGroupsWrapperActions.getAllGroups();
const allGoupsInDb = ConvoHub.use()
const allGroupsInWrapper = await UserGroupsWrapperActions.getAllGroups();
const allGroupsInDb = ConvoHub.use()
.getConversations()
.filter(m => PubKey.isClosedGroupV2(m.id));
const allGoupsIdsInWrapper = allGoupsInWrapper.map(m => m.pubkeyHex);
const allGoupsIdsInDb = allGoupsInDb.map(m => m.id as string);
const allGoupsIdsInWrapper = allGroupsInWrapper.map(m => m.pubkeyHex);
const allGoupsIdsInDb = allGroupsInDb.map(m => m.id as string);
window.log.debug('allGoupsIdsInWrapper', stringify(allGoupsIdsInWrapper));
window.log.debug('allGoupsIdsInDb', stringify(allGoupsIdsInDb));
@ -687,8 +690,8 @@ async function handleGroupUpdate(latestEnvelopeTimestamp: number) {
throw new Error('userEdKeypair is not set');
}
for (let index = 0; index < allGoupsInWrapper.length; index++) {
const groupInWrapper = allGoupsInWrapper[index];
for (let index = 0; index < allGroupsInWrapper.length; index++) {
const groupInWrapper = allGroupsInWrapper[index];
window.inboxStore.dispatch(groupInfoActions.handleUserGroupUpdate(groupInWrapper));
await handleSingleGroupUpdate({ groupInWrapper, latestEnvelopeTimestamp, userEdKeypair });

@ -82,9 +82,6 @@ async function handleRequestDetail(
): Promise<void> {
const envelope: any = contentIsEnvelope(data) ? data : SignalService.Envelope.decode(data);
// After this point, decoding errors are not the server's
// fault, and we should handle them gracefully and tell the
// user they received an invalid message
// The message is for a group
if (inConversation) {
const ourNumber = UserUtils.getOurPubKeyStrFromCache();

@ -41,6 +41,8 @@ import {
RetrieveRequestResult,
} from './types';
const minMsgCountShouldRetry = 95;
export function extractWebSocketContent(message: string): null | Uint8Array {
try {
const dataPlaintext = new Uint8Array(StringUtils.encode(message, 'base64'));
@ -272,8 +274,8 @@ export class SwarmPolling {
`Polled for group(${ed25519Str(pubkey)}):, got ${countMessages} messages back.`
);
let lastPolledTimestamp = Date.now();
if (countMessages >= 95) {
// if we get 95 messages or more back, it means there are probably more than this
if (countMessages >= minMsgCountShouldRetry) {
// if we get `minMsgCountShouldRetry` messages or more back, it means there are probably more than this
// so make sure to retry the polling in the next 5sec by marking the last polled timestamp way before that it is really
// this is a kind of hack
lastPolledTimestamp = Date.now() - SWARM_POLLING_TIMEOUT.INACTIVE - 5 * 1000;
@ -335,7 +337,7 @@ export class SwarmPolling {
}
if (!resultsFromAllNamespaces?.length) {
// Not a single message from any of the polled namespace was retrieve.
// Not a single message from any of the polled namespace was retrieved.
// We must still mark the current pubkey as "was just polled"
await this.updateLastPollTimestampForPubkey({
countMessages: 0,

@ -61,3 +61,5 @@ export const UI = {
export const DEFAULT_RECENT_REACTS = ['😂', '🥰', '😢', '😡', '😮', '😈'];
export const MAX_USERNAME_BYTES = 64;
export const UPDATER_INTERVAL_MS = 10 * DURATION.MINUTES;

@ -52,6 +52,7 @@ export class ClosedGroupVisibleMessage extends ClosedGroupMessage {
type WithDestinationGroupPk = { destination: GroupPubkeyType };
type WithGroupMessageNamespace = { namespace: SnodeNamespaces.ClosedGroupMessages };
// TODO audric This will need to extend ExpirableMessage after Disappearing Messages V2 is merged
export class ClosedGroupV2VisibleMessage extends DataMessage {
private readonly chatMessage: VisibleMessage;
public readonly destination: GroupPubkeyType;

@ -1,8 +1,17 @@
import { AbortSignal } from 'abort-controller';
import { toNumber } from 'lodash';
import pRetry from 'p-retry';
import { AbortSignal } from 'abort-controller';
import { OnionPaths } from '.';
import { Snode } from '../../data/data';
import { fileServerPubKey, fileServerURL } from '../apis/file_server_api/FileServerApi';
import { OpenGroupPollingUtils } from '../apis/open_group_api/opengroupV2/OpenGroupPollingUtils';
import { invalidAuthRequiresBlinding } from '../apis/open_group_api/opengroupV2/OpenGroupServerPoller';
import {
addBinaryContentTypeToHeaders,
addJsonContentTypeToHeaders,
} from '../apis/open_group_api/sogsv3/sogsV3SendMessage';
import { pnServerPubkeyHex, pnServerUrl } from '../apis/push_notification_api/PnServer';
import {
buildErrorMessageWithFailedCode,
FinalDestNonSnodeOptions,
@ -12,16 +21,7 @@ import {
STATUS_NO_STATUS,
} from '../apis/snode_api/onions';
import { PROTOCOLS } from '../constants';
import { Snode } from '../../data/data';
import { OnionV4 } from './onionv4';
import { OpenGroupPollingUtils } from '../apis/open_group_api/opengroupV2/OpenGroupPollingUtils';
import {
addBinaryContentTypeToHeaders,
addJsonContentTypeToHeaders,
} from '../apis/open_group_api/sogsv3/sogsV3SendMessage';
import { pnServerPubkeyHex, pnServerUrl } from '../apis/push_notification_api/PnServer';
import { fileServerPubKey, fileServerURL } from '../apis/file_server_api/FileServerApi';
import { invalidAuthRequiresBlinding } from '../apis/open_group_api/opengroupV2/OpenGroupServerPoller';
export type OnionFetchOptions = {
method: string;
@ -525,6 +525,9 @@ async function sendJsonViaOnionV4ToFileServer(sendOptions: {
return res as OnionV4JSONSnodeResponse;
}
/**
* This is used during stubbing so we can override the time between retries (so the unit tests are faster)
*/
function getMinTimeoutForSogs() {
return 100;
}

@ -26,7 +26,7 @@ const defaultMsBetweenRetries = 15000; // a long time between retries, to avoid
const defaultMaxAttempts = 2;
/**
* We want to run each of those jobs at least 3seconds apart.
* We want to run each of those jobs at least 3 seconds apart.
* So every time one of that job finishes, update this timestamp, so we know when adding a new job, what is the next minimun date to run it.
*/
const lastRunConfigSyncJobTimestamps = new Map<string, number | null>();
@ -54,6 +54,7 @@ async function confirmPushedAndDump(
break;
}
case SnodeNamespaces.ClosedGroupKeys: {
// TODO chunk 2 closed group
break;
}
default:
@ -70,7 +71,7 @@ async function pushChangesToGroupSwarmIfNeeded(groupPk: GroupPubkeyType): Promis
await LibSessionUtil.saveDumpsToDb(groupPk);
const changesToPush = await LibSessionUtil.pendingChangesForGroup(groupPk);
// If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case)
// is updated we want to try and run immediately so don't schedule another run in this case)
if (isEmpty(changesToPush?.messages)) {
return RunJobResult.Success;
}

@ -26,7 +26,7 @@ const defaultMsBetweenRetries = 15000; // a long time between retries, to avoid
const defaultMaxAttempts = 2;
/**
* We want to run each of those jobs at least 3seconds apart.
* We want to run each of those jobs at least 3 seconds apart.
* So every time one of that job finishes, update this timestamp, so we know when adding a new job, what is the next minimun date to run it.
*/
let lastRunConfigSyncJobTimestamp: number | null = null;
@ -81,7 +81,7 @@ async function pushChangesToUserSwarmIfNeeded() {
const changesToPush = await LibSessionUtil.pendingChangesForUs();
// If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case)
// is updated we want to try and run immediately so don't schedule another run in this case)
if (isEmpty(changesToPush?.messages)) {
triggerConfSyncJobDone();
return RunJobResult.Success;

@ -2,15 +2,15 @@ import { expect } from 'chai';
import { isUndefined } from 'lodash';
import Sinon from 'sinon';
import { v4 } from 'uuid';
import { sleepFor } from '../../../../../session/utils/Promise';
import { PersistedJobRunner } from '../../../../../session/utils/job_runners/JobRunner';
import { FakeSleepForJob, FakeSleepForMultiJob } from './FakeSleepForJob';
import {
FakeSleepForMultiJobData,
FakeSleepJobData,
} from '../../../../../session/utils/job_runners/PersistedJob';
import { sleepFor } from '../../../../../session/utils/Promise';
import { stubData } from '../../../../test-utils/utils';
import { TestUtils } from '../../../../test-utils';
import { stubData } from '../../../../test-utils/utils';
import { FakeSleepForJob, FakeSleepForMultiJob } from './FakeSleepForJob';
function getFakeSleepForJob(timestamp: number): FakeSleepForJob {
const job = new FakeSleepForJob({
@ -199,12 +199,6 @@ describe('JobRunner', () => {
expect(runnerMulti.getJobList()).to.deep.eq([job.serializeJob(), job2.serializeJob()]);
expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(job.persistedData.identifier);
// console.info(
// 'runnerMulti.getJobList() initial',
// runnerMulti.getJobList().map(m => m.identifier),
// Date.now()
// );
// each job takes 5s to finish, so let's tick once the first one should be done
clock.tick(5000);
expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(job.persistedData.identifier);

@ -1,14 +1,16 @@
/* eslint-disable @typescript-eslint/no-misused-promises */
/* eslint-disable no-console */
import * as path from 'path';
import { app, BrowserWindow } from 'electron';
import { autoUpdater, UpdateInfo } from 'electron-updater';
import * as fs from 'fs-extra';
// eslint-disable-next-line import/order
import * as path from 'path';
import { gt as isVersionGreaterThan, parse as parseVersion } from 'semver';
import { windowMarkShouldQuit } from '../node/window_state';
import { getLastestRelease } from '../node/latest_desktop_release';
import { UPDATER_INTERVAL_MS } from '../session/constants';
import {
getPrintableError,
LoggerType,
@ -40,16 +42,13 @@ export async function start(
autoUpdater.logger = logger;
autoUpdater.autoDownload = false;
interval = global.setInterval(
async () => {
try {
await checkForUpdates(getMainWindow, messages, logger);
} catch (error) {
logger.error('auto-update: error:', getPrintableError(error));
}
},
1000 * 60 * 10
); // trigger and try to update every 10 minutes to let the file gets downloaded if we are updating
interval = global.setInterval(async () => {
try {
await checkForUpdates(getMainWindow, messages, logger);
} catch (error) {
logger.error('auto-update: error:', getPrintableError(error));
}
}, UPDATER_INTERVAL_MS); // trigger and try to update every 10 minutes to let the file gets downloaded if we are updating
stopped = false;
global.setTimeout(

@ -27,10 +27,6 @@
"resolveJsonModule": true,
// Module Resolution Options
// "baseUrl": "./", // Base directory to resolve non-absolute module names.
// "paths": {
// // A series of entries which re-map imports to lookup locations relative to the 'baseUrl'.
// "@/ducks": ["ts/state/ducks/"]
// },
// "rootDirs": [], // List of root folders whose combined content represents the structure of the project at runtime.
// "typeRoots": [], // List of folders to include type definitions from.
// "types": [], // Type declaration files to be included in compilation.

Loading…
Cancel
Save