Remove storage checks and add a workspace readonly state (#282)

* Remove storage checks and add a workspace readonly state
* Make deprecated migrations as no-op
This commit is contained in:
Hakan Shehu
2026-01-09 11:57:52 +01:00
committed by GitHub
parent 84a88cc257
commit be42137b99
84 changed files with 399 additions and 3059 deletions

View File

@@ -31,12 +31,6 @@
}
},
"user": {
"_comment": "Per-user storage limits (in bytes)",
"storageLimit": "10737418240",
"maxFileSize": "104857600"
},
"postgres": {
"_comment": "PostgreSQL database connection",
"url": "env://POSTGRES_URL",
@@ -116,11 +110,5 @@
"logging": {
"_comment": "Logging level: trace, debug, info, warn, error, fatal, silent",
"level": "info"
},
"workspace": {
"_comment": "Per-workspace storage limits (optional)",
"storageLimit": "21474836480",
"maxFileSize": "524288000"
}
}

View File

@@ -4,11 +4,11 @@ import fp from 'fastify-plugin';
import { ApiErrorCode } from '@colanode/core';
import { isDeviceApiRateLimited } from '@colanode/server/lib/rate-limits';
import { parseToken, verifyToken } from '@colanode/server/lib/tokens';
import { RequestAccount } from '@colanode/server/types/api';
import { AccountContext } from '@colanode/server/types/api';
declare module 'fastify' {
interface FastifyRequest {
account: RequestAccount;
account: AccountContext;
}
}

View File

@@ -1,13 +1,18 @@
import { FastifyPluginCallback } from 'fastify';
import fp from 'fastify-plugin';
import { ApiErrorCode, UserStatus } from '@colanode/core';
import {
ApiErrorCode,
UserStatus,
WorkspaceRole,
WorkspaceStatus,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { SelectUser } from '@colanode/server/data/schema';
import { WorkspaceContext } from '@colanode/server/types/api';
declare module 'fastify' {
interface FastifyRequest {
user: SelectUser;
workspace: WorkspaceContext;
}
}
@@ -34,21 +39,45 @@ const workspaceAuthenticatorCallback: FastifyPluginCallback = (
});
}
const user = await database
.selectFrom('users')
.selectAll()
.where('workspace_id', '=', workspaceId)
.where('account_id', '=', request.account.id)
const workspace = await database
.selectFrom('workspaces')
.innerJoin('users', 'workspaces.id', 'users.workspace_id')
.select([
'workspaces.id as workspace_id',
'workspaces.max_file_size as max_file_size',
'workspaces.status as status',
'users.id as user_id',
'users.role as user_role',
'users.status as user_status',
])
.where('workspaces.id', '=', workspaceId)
.where('users.account_id', '=', request.account.id)
.executeTakeFirst();
if (!user || user.status !== UserStatus.Active || user.role === 'none') {
if (
!workspace ||
workspace.status !== WorkspaceStatus.Active ||
workspace.user_role === 'none' ||
workspace.user_status !== UserStatus.Active
) {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceNoAccess,
message: 'You do not have access to this workspace.',
});
}
request.user = user;
const context: WorkspaceContext = {
id: workspace.workspace_id,
maxFileSize: workspace.max_file_size,
status: workspace.status,
user: {
id: workspace.user_id,
accountId: request.account.id,
role: workspace.user_role as WorkspaceRole,
},
};
request.workspace = context;
});
done();

View File

@@ -89,12 +89,11 @@ export const accountSyncRoute: FastifyPluginCallbackZod = (
name: workspace.name,
avatar: workspace.avatar,
description: workspace.description,
status: workspace.status,
user: {
id: user.id,
accountId: user.account_id,
role: user.role as WorkspaceRole,
storageLimit: user.storage_limit,
maxFileSize: user.max_file_size,
},
});
}

View File

@@ -59,7 +59,7 @@ export const fileDownloadRoute: FastifyPluginCallbackZod = (
});
}
const role = extractNodeRole(nodes, request.user.id);
const role = extractNodeRole(nodes, request.workspace.user.id);
if (role === null || !hasNodeRole(role, 'viewer')) {
return reply.code(403).send({
code: ApiErrorCode.FileNoAccess,

View File

@@ -2,11 +2,16 @@ import { Server } from '@tus/server';
import { FastifyPluginCallbackZod } from 'fastify-type-provider-zod';
import { z } from 'zod/v4';
import { ApiErrorCode, FileStatus, generateId, IdType } from '@colanode/core';
import {
ApiErrorCode,
FileStatus,
generateId,
IdType,
WorkspaceStatus,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { redis } from '@colanode/server/data/redis';
import { config } from '@colanode/server/lib/config';
import { fetchCounter } from '@colanode/server/lib/counters';
import { generateUrl } from '@colanode/server/lib/fastify';
import { mapNode, updateNode } from '@colanode/server/lib/nodes';
import { storage } from '@colanode/server/lib/storage';
@@ -35,18 +40,12 @@ export const fileUploadTusRoute: FastifyPluginCallbackZod = (
},
handler: async (request, reply) => {
const { workspaceId, fileId } = request.params;
const user = request.user;
const user = request.workspace.user;
const workspace = await database
.selectFrom('workspaces')
.selectAll()
.where('id', '=', workspaceId)
.executeTakeFirst();
if (!workspace) {
return reply.code(404).send({
code: ApiErrorCode.WorkspaceNotFound,
message: 'Workspace not found.',
if (request.workspace.status === WorkspaceStatus.Readonly) {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceReadonly,
message: 'Workspace is readonly and you cannot upload files.',
});
}
@@ -105,19 +104,8 @@ export const fileUploadTusRoute: FastifyPluginCallbackZod = (
};
}
if (file.size > BigInt(user.max_file_size)) {
throw {
status_code: 400,
body: JSON.stringify({
code: ApiErrorCode.UserMaxFileSizeExceeded,
message:
'The file size exceeds the maximum allowed size for your account.',
}),
};
}
if (workspace.max_file_size) {
if (file.size > BigInt(workspace.max_file_size)) {
if (request.workspace.maxFileSize) {
if (file.size > BigInt(request.workspace.maxFileSize)) {
throw {
status_code: 400,
body: JSON.stringify({
@@ -129,40 +117,6 @@ export const fileUploadTusRoute: FastifyPluginCallbackZod = (
}
}
const userStorageUsed = await fetchCounter(
database,
`${user.id}.uploads.size`
);
if (userStorageUsed >= BigInt(user.storage_limit)) {
throw {
status_code: 400,
body: JSON.stringify({
code: ApiErrorCode.UserStorageLimitExceeded,
message:
'You have reached the maximum storage limit for your account.',
}),
};
}
if (workspace.storage_limit) {
const workspaceStorageUsed = await fetchCounter(
database,
`${workspaceId}.uploads.size`
);
if (workspaceStorageUsed >= BigInt(workspace.storage_limit)) {
throw {
status_code: 400,
body: JSON.stringify({
code: ApiErrorCode.WorkspaceStorageLimitExceeded,
message:
'The workspace has reached the maximum storage limit for this workspace.',
}),
};
}
}
const createdUpload = await database
.insertInto('uploads')
.returningAll()
@@ -176,7 +130,7 @@ export const fileUploadTusRoute: FastifyPluginCallbackZod = (
path: path,
version_id: file.version,
created_at: new Date(),
created_by: request.user.id,
created_by: request.workspace.user.id,
})
.onConflict((oc) =>
oc.columns(['file_id']).doUpdateSet({
@@ -239,7 +193,7 @@ export const fileUploadTusRoute: FastifyPluginCallbackZod = (
const result = await updateNode({
nodeId: fileId,
userId: request.user.id,
userId: request.workspace.user.id,
workspaceId: workspaceId,
updater(attributes) {
if (attributes.type !== 'file') {

View File

@@ -5,7 +5,6 @@ import { workspaceAuthenticator } from '@colanode/server/api/client/plugins/work
import { fileRoutes } from './files';
import { mutationsRoutes } from './mutations';
import { storageRoutes } from './storage';
import { userRoutes } from './users';
import { workspaceCreateRoute } from './workspace-create';
import { workspaceDeleteRoute } from './workspace-delete';
@@ -28,7 +27,6 @@ export const workspaceRoutes: FastifyPluginCallback = (instance, _, done) => {
subInstance.register(fileRoutes, { prefix: '/files' });
subInstance.register(userRoutes, { prefix: '/users' });
subInstance.register(mutationsRoutes, { prefix: '/mutations' });
subInstance.register(storageRoutes, { prefix: '/storage' });
},
{
prefix: '/:workspaceId',

View File

@@ -5,8 +5,9 @@ import {
MutationStatus,
Mutation,
syncMutationsInputSchema,
ApiErrorCode,
WorkspaceStatus,
} from '@colanode/core';
import { SelectUser } from '@colanode/server/data/schema';
import { updateDocumentFromMutation } from '@colanode/server/lib/documents';
import {
markNodeAsOpened,
@@ -21,6 +22,7 @@ import {
updateNodeFromMutation,
deleteNodeFromMutation,
} from '@colanode/server/lib/nodes';
import { WorkspaceContext } from '@colanode/server/types/api';
export const mutationsSyncRoute: FastifyPluginCallbackZod = (
instance,
@@ -33,14 +35,21 @@ export const mutationsSyncRoute: FastifyPluginCallbackZod = (
schema: {
body: syncMutationsInputSchema,
},
handler: async (request) => {
handler: async (request, reply) => {
const input = request.body;
const user = request.user;
const workspace = request.workspace;
if (workspace.status === WorkspaceStatus.Readonly) {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceReadonly,
message: 'Workspace is readonly and you cannot make any changes.',
});
}
const results: SyncMutationResult[] = [];
for (const mutation of input.mutations) {
try {
const status = await handleMutation(user, mutation);
const status = await handleMutation(workspace, mutation);
results.push({
id: mutation.id,
status: status,
@@ -61,25 +70,25 @@ export const mutationsSyncRoute: FastifyPluginCallbackZod = (
};
const handleMutation = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: Mutation
): Promise<MutationStatus> => {
if (mutation.type === 'node.create') {
return await createNodeFromMutation(user, mutation.data);
return await createNodeFromMutation(workspace, mutation.data);
} else if (mutation.type === 'node.update') {
return await updateNodeFromMutation(user, mutation.data);
return await updateNodeFromMutation(workspace, mutation.data);
} else if (mutation.type === 'node.delete') {
return await deleteNodeFromMutation(user, mutation.data);
return await deleteNodeFromMutation(workspace, mutation.data);
} else if (mutation.type === 'node.reaction.create') {
return await createNodeReaction(user, mutation);
return await createNodeReaction(workspace, mutation);
} else if (mutation.type === 'node.reaction.delete') {
return await deleteNodeReaction(user, mutation);
return await deleteNodeReaction(workspace, mutation);
} else if (mutation.type === 'node.interaction.seen') {
return await markNodeAsSeen(user, mutation);
return await markNodeAsSeen(workspace, mutation);
} else if (mutation.type === 'node.interaction.opened') {
return await markNodeAsOpened(user, mutation);
return await markNodeAsOpened(workspace, mutation);
} else if (mutation.type === 'document.update') {
return await updateDocumentFromMutation(user, mutation.data);
return await updateDocumentFromMutation(workspace, mutation.data);
} else {
return MutationStatus.METHOD_NOT_ALLOWED;
}

View File

@@ -1,9 +0,0 @@
import { FastifyPluginCallback } from 'fastify';
import { workspaceStorageGetRoute } from './workspace-storage-get';
export const storageRoutes: FastifyPluginCallback = (instance, _, done) => {
instance.register(workspaceStorageGetRoute);
done();
};

View File

@@ -1,120 +0,0 @@
import { FastifyPluginCallbackZod } from 'fastify-type-provider-zod';
import { z } from 'zod/v4';
import {
ApiErrorCode,
apiErrorOutputSchema,
WorkspaceStorageGetOutput,
workspaceStorageGetOutputSchema,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
const buildCounterKeys = (id: string) => [
`${id}.uploads.size`,
`${id}.uploads.count`,
`${id}.nodes.size`,
`${id}.nodes.count`,
`${id}.documents.size`,
`${id}.documents.count`,
];
const buildUsage = (id: string, counters: Record<string, string>) => ({
uploads: {
size: counters[`${id}.uploads.size`] ?? '0',
count: counters[`${id}.uploads.count`] ?? '0',
},
nodes: {
size: counters[`${id}.nodes.size`] ?? '0',
count: counters[`${id}.nodes.count`] ?? '0',
},
documents: {
size: counters[`${id}.documents.size`] ?? '0',
count: counters[`${id}.documents.count`] ?? '0',
},
});
const fetchCounterMap = async (keys: string[]) => {
if (keys.length === 0) {
return {};
}
const counters = await database
.selectFrom('counters')
.select(['key', 'value'])
.where('key', 'in', keys)
.execute();
return counters.reduce<Record<string, string>>((acc, counter) => {
acc[counter.key] = counter.value ?? '0';
return acc;
}, {});
};
export const workspaceStorageGetRoute: FastifyPluginCallbackZod = (
instance,
_,
done
) => {
instance.route({
method: 'GET',
url: '/',
schema: {
params: z.object({
workspaceId: z.string(),
}),
response: {
200: workspaceStorageGetOutputSchema,
400: apiErrorOutputSchema,
403: apiErrorOutputSchema,
404: apiErrorOutputSchema,
},
},
handler: async (request, reply) => {
const workspaceId = request.params.workspaceId;
const user = request.user;
const isWorkspaceAdmin = user.role === 'owner' || user.role === 'admin';
const workspace = await database
.selectFrom('workspaces')
.select(['id', 'storage_limit', 'max_file_size'])
.where('id', '=', workspaceId)
.executeTakeFirstOrThrow();
if (!workspace) {
return reply.code(404).send({
code: ApiErrorCode.WorkspaceNotFound,
message: 'Workspace not found.',
});
}
const userCounterKeys = buildCounterKeys(user.id);
const [userCounterMap, workspaceCounterMap] = await Promise.all([
fetchCounterMap(userCounterKeys),
isWorkspaceAdmin
? fetchCounterMap(buildCounterKeys(workspaceId))
: Promise.resolve<Record<string, string>>({}),
]);
const output: WorkspaceStorageGetOutput = {
user: {
id: user.id,
storageLimit: user.storage_limit,
maxFileSize: user.max_file_size,
usage: buildUsage(user.id, userCounterMap),
},
};
if (isWorkspaceAdmin) {
output.workspace = {
storageLimit: workspace.storage_limit,
maxFileSize: workspace.max_file_size,
usage: buildUsage(workspaceId, workspaceCounterMap),
};
}
return output;
},
});
done();
};

View File

@@ -1,15 +1,11 @@
import { FastifyPluginCallback } from 'fastify';
import { userRoleUpdateRoute } from './user-role-update';
import { userStorageUpdateRoute } from './user-storage-update';
import { usersCreateRoute } from './users-create';
import { usersStorageGetRoute } from './users-storage-get';
export const userRoutes: FastifyPluginCallback = (instance, _, done) => {
instance.register(usersCreateRoute);
instance.register(userRoleUpdateRoute);
instance.register(userStorageUpdateRoute);
instance.register(usersStorageGetRoute);
done();
};

View File

@@ -7,6 +7,7 @@ import {
userRoleUpdateInputSchema,
apiErrorOutputSchema,
userOutputSchema,
WorkspaceStatus,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { eventBus } from '@colanode/server/lib/event-bus';
@@ -34,9 +35,16 @@ export const userRoleUpdateRoute: FastifyPluginCallbackZod = (
handler: async (request, reply) => {
const userId = request.params.userId;
const input = request.body;
const user = request.user;
const workspace = request.workspace;
if (user.role !== 'owner' && user.role !== 'admin') {
if (workspace.status === WorkspaceStatus.Readonly) {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceReadonly,
message: 'Workspace is readonly and you cannot update user roles.',
});
}
if (workspace.user.role !== 'owner' && workspace.user.role !== 'admin') {
return reply.code(403).send({
code: ApiErrorCode.UserUpdateNoAccess,
message: 'You do not have access to update users to this workspace.',
@@ -66,7 +74,7 @@ export const userRoleUpdateRoute: FastifyPluginCallbackZod = (
role: input.role,
status,
updated_at: new Date(),
updated_by: user.account_id,
updated_by: request.account.id,
})
.where('id', '=', userToUpdate.id)
.executeTakeFirst();

View File

@@ -1,111 +0,0 @@
import { FastifyPluginCallbackZod } from 'fastify-type-provider-zod';
import { z } from 'zod/v4';
import {
ApiErrorCode,
apiErrorOutputSchema,
userOutputSchema,
userStorageUpdateInputSchema,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { eventBus } from '@colanode/server/lib/event-bus';
export const userStorageUpdateRoute: FastifyPluginCallbackZod = (
instance,
_,
done
) => {
instance.route({
method: 'PATCH',
url: '/:userId/storage',
schema: {
params: z.object({
userId: z.string(),
}),
body: userStorageUpdateInputSchema,
response: {
200: userOutputSchema,
400: apiErrorOutputSchema,
403: apiErrorOutputSchema,
404: apiErrorOutputSchema,
},
},
handler: async (request, reply) => {
const userId = request.params.userId;
const input = request.body;
const user = request.user;
if (user.role !== 'owner' && user.role !== 'admin') {
return reply.code(403).send({
code: ApiErrorCode.UserUpdateNoAccess,
message: 'You do not have access to update users to this workspace.',
});
}
const userToUpdate = await database
.selectFrom('users')
.selectAll()
.where('id', '=', userId)
.executeTakeFirst();
if (!userToUpdate) {
return reply.code(404).send({
code: ApiErrorCode.UserNotFound,
message: 'User not found.',
});
}
const storageLimit = BigInt(input.storageLimit);
const maxFileSize = BigInt(input.maxFileSize);
if (maxFileSize > storageLimit) {
return reply.code(400).send({
code: ApiErrorCode.ValidationError,
message: 'Max file size cannot be larger than storage limit.',
});
}
const updatedUser = await database
.updateTable('users')
.returningAll()
.set({
storage_limit: storageLimit.toString(),
max_file_size: maxFileSize.toString(),
updated_at: new Date(),
updated_by: user.account_id,
})
.where('id', '=', userToUpdate.id)
.executeTakeFirst();
if (!updatedUser) {
return reply.code(400).send({
code: ApiErrorCode.UserNotFound,
message: 'User not found.',
});
}
eventBus.publish({
type: 'user.updated',
userId: userToUpdate.id,
accountId: userToUpdate.account_id,
workspaceId: userToUpdate.workspace_id,
});
return {
id: updatedUser.id,
email: updatedUser.email,
name: updatedUser.name,
avatar: updatedUser.avatar,
role: updatedUser.role,
customName: updatedUser.custom_name,
customAvatar: updatedUser.custom_avatar,
createdAt: updatedUser.created_at.toISOString(),
updatedAt: updatedUser.updated_at?.toISOString() ?? null,
revision: updatedUser.revision,
status: updatedUser.status,
};
},
});
done();
};

View File

@@ -11,10 +11,10 @@ import {
UsersCreateOutput,
usersCreateOutputSchema,
UserStatus,
WorkspaceStatus,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { SelectAccount } from '@colanode/server/data/schema';
import { config } from '@colanode/server/lib/config';
import { eventBus } from '@colanode/server/lib/event-bus';
import { getNameFromEmail } from '@colanode/server/lib/utils';
@@ -41,7 +41,14 @@ export const usersCreateRoute: FastifyPluginCallbackZod = (
handler: async (request, reply) => {
const workspaceId = request.params.workspaceId;
const input = request.body;
const user = request.user;
const workspace = request.workspace;
if (workspace.status === WorkspaceStatus.Readonly) {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceReadonly,
message: 'Workspace is readonly and you cannot invite users.',
});
}
if (!input.users || input.users.length === 0) {
return reply.code(400).send({
@@ -50,7 +57,7 @@ export const usersCreateRoute: FastifyPluginCallbackZod = (
});
}
if (user.role !== 'owner' && user.role !== 'admin') {
if (workspace.user.role !== 'owner' && workspace.user.role !== 'admin') {
return reply.code(403).send({
code: ApiErrorCode.UserInviteNoAccess,
message: 'You do not have access to invite users to this workspace.',
@@ -108,11 +115,11 @@ export const usersCreateRoute: FastifyPluginCallbackZod = (
name: account.name,
email: account.email,
avatar: account.avatar,
storage_limit: config.user.storageLimit,
max_file_size: config.user.maxFileSize,
created_at: new Date(),
created_by: request.account.id,
status: UserStatus.Active,
max_file_size: '0',
storage_limit: '0',
})
.executeTakeFirst();

View File

@@ -1,112 +0,0 @@
import { FastifyPluginCallbackZod } from 'fastify-type-provider-zod';
import { z } from 'zod/v4';
import {
ApiErrorCode,
apiErrorOutputSchema,
workspaceStorageUsersGetOutputSchema,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
const querySchema = z.object({
limit: z.coerce.number().int().min(1).max(100).default(50),
after: z.string().optional(),
});
export const usersStorageGetRoute: FastifyPluginCallbackZod = (
instance,
_,
done
) => {
instance.route({
method: 'GET',
url: '/storage',
schema: {
params: z.object({
workspaceId: z.string(),
}),
querystring: querySchema,
response: {
200: workspaceStorageUsersGetOutputSchema,
400: apiErrorOutputSchema,
403: apiErrorOutputSchema,
},
},
handler: async (request, reply) => {
const workspaceId = request.params.workspaceId;
const { limit, after } = request.query;
const user = request.user;
if (user.role !== 'owner' && user.role !== 'admin') {
return reply.code(403).send({
code: ApiErrorCode.UserInviteNoAccess,
message: 'You do not have access to get user storage.',
});
}
let usersQuery = database
.selectFrom('users')
.select(['id', 'storage_limit', 'max_file_size'])
.where('workspace_id', '=', workspaceId);
if (after) {
usersQuery = usersQuery.where('id', '>', after);
}
const userRows = await usersQuery.orderBy('id').limit(limit).execute();
const userIds = userRows.map((row) => row.id);
let counterMap: Record<string, string> = {};
if (userIds.length > 0) {
const counterKeys = userIds.flatMap((id) => [
`${id}.uploads.size`,
`${id}.uploads.count`,
`${id}.nodes.size`,
`${id}.nodes.count`,
`${id}.documents.size`,
`${id}.documents.count`,
]);
const counters = await database
.selectFrom('counters')
.select(['key', 'value'])
.where('key', 'in', counterKeys)
.execute();
counterMap = counters.reduce<Record<string, string>>((acc, counter) => {
acc[counter.key] = counter.value ?? '0';
return acc;
}, {});
}
const getCounterValue = (key: string) => counterMap[key] ?? '0';
const users = userRows.map((row) => ({
id: row.id,
storageLimit: row.storage_limit,
maxFileSize: row.max_file_size,
usage: {
uploads: {
size: getCounterValue(`${row.id}.uploads.size`),
count: getCounterValue(`${row.id}.uploads.count`),
},
nodes: {
size: getCounterValue(`${row.id}.nodes.size`),
count: getCounterValue(`${row.id}.nodes.count`),
},
documents: {
size: getCounterValue(`${row.id}.documents.size`),
count: getCounterValue(`${row.id}.documents.count`),
},
},
}));
return {
users,
};
},
});
done();
};

View File

@@ -32,7 +32,7 @@ export const workspaceDeleteRoute: FastifyPluginCallbackZod = (
handler: async (request, reply) => {
const workspaceId = request.params.workspaceId;
if (request.user.role !== 'owner') {
if (request.workspace.user.role !== 'owner') {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceDeleteNotAllowed,
message:
@@ -79,12 +79,11 @@ export const workspaceDeleteRoute: FastifyPluginCallbackZod = (
name: workspace.name,
description: workspace.description,
avatar: workspace.avatar,
status: workspace.status,
user: {
id: request.user.id,
accountId: request.user.account_id,
role: request.user.role,
storageLimit: request.user.storage_limit,
maxFileSize: request.user.max_file_size,
id: request.workspace.user.id,
accountId: request.workspace.user.accountId,
role: request.workspace.user.role,
},
};
},

View File

@@ -65,12 +65,11 @@ export const workspaceGetRoute: FastifyPluginCallbackZod = (
name: workspace.name,
description: workspace.description,
avatar: workspace.avatar,
status: workspace.status,
user: {
id: user.id,
accountId: user.account_id,
role: user.role as WorkspaceRole,
storageLimit: user.storage_limit,
maxFileSize: user.max_file_size,
},
};

View File

@@ -7,6 +7,7 @@ import {
apiErrorOutputSchema,
workspaceOutputSchema,
workspaceUpdateInputSchema,
WorkspaceStatus,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { eventBus } from '@colanode/server/lib/event-bus';
@@ -36,7 +37,15 @@ export const workspaceUpdateRoute: FastifyPluginCallbackZod = (
const workspaceId = request.params.workspaceId;
const input = request.body;
if (request.user.role !== 'owner') {
if (request.workspace.status === WorkspaceStatus.Readonly) {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceReadonly,
message:
'Workspace is readonly and you cannot update this workspace.',
});
}
if (request.workspace.user.role !== 'owner') {
return reply.code(403).send({
code: ApiErrorCode.WorkspaceUpdateNotAllowed,
message:
@@ -51,7 +60,7 @@ export const workspaceUpdateRoute: FastifyPluginCallbackZod = (
description: input.description,
avatar: input.avatar,
updated_at: new Date(),
updated_by: request.user.id,
updated_by: request.account.id,
})
.where('id', '=', workspaceId)
.returningAll()
@@ -74,12 +83,11 @@ export const workspaceUpdateRoute: FastifyPluginCallbackZod = (
name: updatedWorkspace.name,
description: updatedWorkspace.description,
avatar: updatedWorkspace.avatar,
status: updatedWorkspace.status,
user: {
id: request.user.id,
accountId: request.user.account_id,
role: request.user.role,
storageLimit: request.user.storage_limit,
maxFileSize: request.user.max_file_size,
id: request.workspace.user.id,
accountId: request.workspace.user.accountId,
role: request.workspace.user.role,
},
};

View File

@@ -1,80 +1,10 @@
import { Migration, sql } from 'kysely';
import { Migration } from 'kysely';
export const createWorkspaceUserCounterTriggers: Migration = {
up: async (db) => {
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at'])
.expression((eb) =>
eb
.selectFrom('users')
.select([
eb
.fn('concat', [
eb.cast(eb.val(''), 'varchar'),
eb.ref('workspace_id'),
eb.cast(eb.val('.users.count'), 'varchar'),
])
.as('key'),
eb.fn.count('id').as('value'),
eb.val(new Date()).as('created_at'),
])
.groupBy('workspace_id')
)
.execute();
// Create trigger function to increment user counter on user insert
await sql`
CREATE OR REPLACE FUNCTION fn_increment_workspace_user_counter() RETURNS TRIGGER AS $$
BEGIN
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (
CONCAT(NEW.workspace_id, '.users.count'),
1,
NOW(),
NOW()
)
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_increment_workspace_user_counter
AFTER INSERT ON users
FOR EACH ROW
EXECUTE FUNCTION fn_increment_workspace_user_counter();
`.execute(db);
// Create trigger function to decrement user counter on user delete
await sql`
CREATE OR REPLACE FUNCTION fn_decrement_workspace_user_counter() RETURNS TRIGGER AS $$
BEGIN
UPDATE counters
SET
value = GREATEST(0, value - 1),
updated_at = NOW()
WHERE key = CONCAT(OLD.workspace_id, '.users.count');
RETURN OLD;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_decrement_workspace_user_counter
AFTER DELETE ON users
FOR EACH ROW
EXECUTE FUNCTION fn_decrement_workspace_user_counter();
`.execute(db);
up: async () => {
// noop - leaving just for backwards compatibility
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_user_counter ON users;
DROP TRIGGER IF EXISTS trg_decrement_workspace_user_counter ON users;
DROP FUNCTION IF EXISTS fn_increment_workspace_user_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_user_counter();
`.execute(db);
down: async () => {
// noop - leaving just for backwards compatibility
},
};

View File

@@ -1,77 +1,10 @@
import { Migration, sql } from 'kysely';
import { Migration } from 'kysely';
export const createWorkspaceNodeCounterTriggers: Migration = {
up: async (db) => {
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at'])
.expression((eb) =>
eb
.selectFrom('nodes')
.select([
eb
.fn('concat', [
eb.ref('workspace_id'),
eb.cast(eb.val('.nodes.count'), 'varchar'),
])
.as('key'),
eb.fn.count('id').as('value'),
eb.val(new Date()).as('created_at'),
])
.groupBy('workspace_id')
)
.execute();
await sql`
CREATE OR REPLACE FUNCTION fn_increment_workspace_node_counter() RETURNS TRIGGER AS $$
BEGIN
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (
CONCAT(NEW.workspace_id, '.nodes.count'),
1,
NOW(),
NOW()
)
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_increment_workspace_node_counter
AFTER INSERT ON nodes
FOR EACH ROW
EXECUTE FUNCTION fn_increment_workspace_node_counter();
`.execute(db);
await sql`
CREATE OR REPLACE FUNCTION fn_decrement_workspace_node_counter() RETURNS TRIGGER AS $$
BEGIN
UPDATE counters
SET
value = GREATEST(0, value - 1),
updated_at = NOW()
WHERE key = CONCAT(OLD.workspace_id, '.nodes.count');
RETURN OLD;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_decrement_workspace_node_counter
AFTER DELETE ON nodes
FOR EACH ROW
EXECUTE FUNCTION fn_decrement_workspace_node_counter();
`.execute(db);
up: async () => {
// noop - leaving just for backwards compatibility
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_node_counter ON nodes;
DROP TRIGGER IF EXISTS trg_decrement_workspace_node_counter ON nodes;
DROP FUNCTION IF EXISTS fn_increment_workspace_node_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_node_counter();
`.execute(db);
down: async () => {
// noop - leaving just for backwards compatibility
},
};

View File

@@ -1,104 +1,10 @@
import { Migration, sql } from 'kysely';
import { Migration } from 'kysely';
export const createWorkspaceUploadCounterTriggers: Migration = {
up: async (db) => {
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at'])
.expression((eb) =>
eb
.selectFrom('uploads')
.select([
eb
.fn('concat', [
eb.ref('workspace_id'),
eb.cast(eb.val('.storage.used'), 'varchar'),
])
.as('key'),
eb.fn.sum('size').as('value'),
eb.val(new Date()).as('created_at'),
])
.groupBy('workspace_id')
)
.execute();
await sql`
CREATE OR REPLACE FUNCTION fn_increment_workspace_storage_counter() RETURNS TRIGGER AS $$
BEGIN
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (
CONCAT(NEW.workspace_id, '.storage.used'),
NEW.size,
NOW(),
NOW()
)
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + NEW.size,
updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_increment_workspace_storage_counter
AFTER INSERT ON uploads
FOR EACH ROW
EXECUTE FUNCTION fn_increment_workspace_storage_counter();
`.execute(db);
await sql`
CREATE OR REPLACE FUNCTION fn_decrement_workspace_storage_counter() RETURNS TRIGGER AS $$
BEGIN
UPDATE counters
SET
value = GREATEST(0, value - OLD.size),
updated_at = NOW()
WHERE key = CONCAT(OLD.workspace_id, '.storage.used');
RETURN OLD;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_decrement_workspace_storage_counter
AFTER DELETE ON uploads
FOR EACH ROW
EXECUTE FUNCTION fn_decrement_workspace_storage_counter();
`.execute(db);
await sql`
CREATE OR REPLACE FUNCTION fn_update_workspace_storage_counter() RETURNS TRIGGER AS $$
DECLARE
size_difference BIGINT;
BEGIN
IF OLD.size IS DISTINCT FROM NEW.size THEN
size_difference := NEW.size - OLD.size;
UPDATE counters
SET
value = GREATEST(0, value + size_difference),
updated_at = NOW()
WHERE key = CONCAT(NEW.workspace_id, '.storage.used');
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_update_workspace_storage_counter
AFTER UPDATE ON uploads
FOR EACH ROW
EXECUTE FUNCTION fn_update_workspace_storage_counter();
`.execute(db);
up: async () => {
// noop - leaving just for backwards compatibility
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_decrement_workspace_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_update_workspace_storage_counter ON uploads;
DROP FUNCTION IF EXISTS fn_increment_workspace_storage_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_storage_counter();
DROP FUNCTION IF EXISTS fn_update_workspace_storage_counter();
`.execute(db);
down: async () => {
// noop - leaving just for backwards compatibility
},
};

View File

@@ -1,104 +1,10 @@
import { Migration, sql } from 'kysely';
import { Migration } from 'kysely';
export const createUserUploadCounterTriggers: Migration = {
up: async (db) => {
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at'])
.expression((eb) =>
eb
.selectFrom('uploads')
.select([
eb
.fn('concat', [
eb.ref('created_by'),
eb.cast(eb.val('.storage.used'), 'varchar'),
])
.as('key'),
eb.fn.sum('size').as('value'),
eb.val(new Date()).as('created_at'),
])
.groupBy('created_by')
)
.execute();
await sql`
CREATE OR REPLACE FUNCTION fn_increment_user_storage_counter() RETURNS TRIGGER AS $$
BEGIN
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (
CONCAT(NEW.created_by, '.storage.used'),
NEW.size,
NOW(),
NOW()
)
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + NEW.size,
updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_increment_user_storage_counter
AFTER INSERT ON uploads
FOR EACH ROW
EXECUTE FUNCTION fn_increment_user_storage_counter();
`.execute(db);
await sql`
CREATE OR REPLACE FUNCTION fn_decrement_user_storage_counter() RETURNS TRIGGER AS $$
BEGIN
UPDATE counters
SET
value = GREATEST(0, value - OLD.size),
updated_at = NOW()
WHERE key = CONCAT(OLD.created_by, '.storage.used');
RETURN OLD;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_decrement_user_storage_counter
AFTER DELETE ON uploads
FOR EACH ROW
EXECUTE FUNCTION fn_decrement_user_storage_counter();
`.execute(db);
await sql`
CREATE OR REPLACE FUNCTION fn_update_user_storage_counter() RETURNS TRIGGER AS $$
DECLARE
size_difference BIGINT;
BEGIN
IF OLD.size IS DISTINCT FROM NEW.size THEN
size_difference := NEW.size - OLD.size;
UPDATE counters
SET
value = GREATEST(0, value + size_difference),
updated_at = NOW()
WHERE key = CONCAT(NEW.created_by, '.storage.used');
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_update_user_storage_counter
AFTER UPDATE ON uploads
FOR EACH ROW
EXECUTE FUNCTION fn_update_user_storage_counter();
`.execute(db);
up: async () => {
// noop - leaving just for backwards compatibility
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_increment_user_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_decrement_user_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_update_user_storage_counter ON uploads;
DROP FUNCTION IF EXISTS fn_increment_user_storage_counter();
DROP FUNCTION IF EXISTS fn_decrement_user_storage_counter();
DROP FUNCTION IF EXISTS fn_update_user_storage_counter();
`.execute(db);
down: async () => {
// noop - leaving just for backwards compatibility
},
};

View File

@@ -1,25 +1,10 @@
import { sql, Migration } from 'kysely';
import { Migration } from 'kysely';
export const removeNodeUpdateRevisionTrigger: Migration = {
up: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_update_node_update_revision ON node_updates;
DROP FUNCTION IF EXISTS update_node_update_revision();
`.execute(db);
up: async () => {
// noop - leaving just for backwards compatibility
},
down: async (db) => {
await sql`
CREATE OR REPLACE FUNCTION update_node_update_revision() RETURNS TRIGGER AS $$
BEGIN
NEW.revision = nextval('node_updates_revision_sequence');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_update_node_update_revision
BEFORE UPDATE ON node_updates
FOR EACH ROW
EXECUTE FUNCTION update_node_update_revision();
`.execute(db);
down: async () => {
// noop - leaving just for backwards compatibility
},
};

View File

@@ -0,0 +1,47 @@
import { Migration, sql } from 'kysely';
export const cleanupCounterTriggers: Migration = {
up: async (db) => {
// Drop triggers and functions from 00026-create-user-upload-counter-triggers.ts
await sql`
DROP TRIGGER IF EXISTS trg_increment_user_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_decrement_user_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_update_user_storage_counter ON uploads;
DROP FUNCTION IF EXISTS fn_increment_user_storage_counter();
DROP FUNCTION IF EXISTS fn_decrement_user_storage_counter();
DROP FUNCTION IF EXISTS fn_update_user_storage_counter();
`.execute(db);
// Drop triggers and functions from 00025-create-workspace-upload-counter-triggers.ts
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_decrement_workspace_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_update_workspace_storage_counter ON uploads;
DROP FUNCTION IF EXISTS fn_increment_workspace_storage_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_storage_counter();
DROP FUNCTION IF EXISTS fn_update_workspace_storage_counter();
`.execute(db);
// Drop triggers and functions from 00024-create-workspace-node-counter-triggers.ts
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_node_counter ON nodes;
DROP TRIGGER IF EXISTS trg_decrement_workspace_node_counter ON nodes;
DROP FUNCTION IF EXISTS fn_increment_workspace_node_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_node_counter();
`.execute(db);
// Drop triggers and functions from 00023-create-workspace-user-counter-triggers.ts
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_user_counter ON users;
DROP TRIGGER IF EXISTS trg_decrement_workspace_user_counter ON users;
DROP FUNCTION IF EXISTS fn_increment_workspace_user_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_user_counter();
`.execute(db);
// Drop counters
await db.deleteFrom('counters').execute();
},
down: async () => {
// This migration is destructive (drops triggers). There is no down migration.
},
};

View File

@@ -1,243 +0,0 @@
import { Migration, sql } from 'kysely';
export const createUploadUsageCounterTriggers: Migration = {
up: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_decrement_workspace_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_update_workspace_storage_counter ON uploads;
DROP FUNCTION IF EXISTS fn_increment_workspace_storage_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_storage_counter();
DROP FUNCTION IF EXISTS fn_update_workspace_storage_counter();
DROP TRIGGER IF EXISTS trg_increment_user_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_decrement_user_storage_counter ON uploads;
DROP TRIGGER IF EXISTS trg_update_user_storage_counter ON uploads;
DROP FUNCTION IF EXISTS fn_increment_user_storage_counter();
DROP FUNCTION IF EXISTS fn_decrement_user_storage_counter();
DROP FUNCTION IF EXISTS fn_update_user_storage_counter();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.storage.used')
.execute();
const now = new Date();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('uploads')
.select([
eb
.fn('concat', [
eb.ref('workspace_id'),
eb.cast(eb.val('.uploads.size'), 'varchar'),
])
.as('key'),
eb.fn.sum('size').as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('workspace_id')
)
.execute();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('uploads')
.select([
eb
.fn('concat', [
eb.ref('workspace_id'),
eb.cast(eb.val('.uploads.count'), 'varchar'),
])
.as('key'),
eb.fn.countAll().as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('workspace_id')
)
.execute();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('uploads')
.select([
eb
.fn('concat', [
eb.ref('created_by'),
eb.cast(eb.val('.uploads.size'), 'varchar'),
])
.as('key'),
eb.fn.sum('size').as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('created_by')
)
.execute();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('uploads')
.select([
eb
.fn('concat', [
eb.ref('created_by'),
eb.cast(eb.val('.uploads.count'), 'varchar'),
])
.as('key'),
eb.fn.countAll().as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('created_by')
)
.execute();
await sql`
CREATE OR REPLACE FUNCTION fn_update_upload_usage_counters() RETURNS TRIGGER AS $$
DECLARE
workspace_size_key text;
workspace_count_key text;
user_size_key text;
user_count_key text;
old_workspace_size_key text;
old_workspace_count_key text;
old_user_size_key text;
old_user_count_key text;
size_difference bigint;
BEGIN
IF TG_OP = 'INSERT' THEN
workspace_size_key := CONCAT(NEW.workspace_id, '.uploads.size');
workspace_count_key := CONCAT(NEW.workspace_id, '.uploads.count');
user_size_key := CONCAT(NEW.created_by, '.uploads.size');
user_count_key := CONCAT(NEW.created_by, '.uploads.count');
INSERT INTO counters (key, value, created_at, updated_at)
VALUES
(workspace_size_key, NEW.size, NOW(), NOW()),
(workspace_count_key, 1, NOW(), NOW()),
(user_size_key, NEW.size, NOW(), NOW()),
(user_count_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
RETURN NEW;
ELSIF TG_OP = 'DELETE' THEN
workspace_size_key := CONCAT(OLD.workspace_id, '.uploads.size');
workspace_count_key := CONCAT(OLD.workspace_id, '.uploads.count');
user_size_key := CONCAT(OLD.created_by, '.uploads.size');
user_count_key := CONCAT(OLD.created_by, '.uploads.count');
UPDATE counters
SET value = GREATEST(0, value - CASE
WHEN key = workspace_size_key THEN OLD.size
WHEN key = workspace_count_key THEN 1
WHEN key = user_size_key THEN OLD.size
WHEN key = user_count_key THEN 1
END),
updated_at = NOW()
WHERE key IN (workspace_size_key, workspace_count_key, user_size_key, user_count_key);
RETURN OLD;
ELSE
workspace_size_key := CONCAT(NEW.workspace_id, '.uploads.size');
workspace_count_key := CONCAT(NEW.workspace_id, '.uploads.count');
user_size_key := CONCAT(NEW.created_by, '.uploads.size');
user_count_key := CONCAT(NEW.created_by, '.uploads.count');
old_workspace_size_key := CONCAT(OLD.workspace_id, '.uploads.size');
old_workspace_count_key := CONCAT(OLD.workspace_id, '.uploads.count');
old_user_size_key := CONCAT(OLD.created_by, '.uploads.size');
old_user_count_key := CONCAT(OLD.created_by, '.uploads.count');
IF OLD.workspace_id IS DISTINCT FROM NEW.workspace_id THEN
UPDATE counters
SET value = GREATEST(0, value - CASE
WHEN key = old_workspace_size_key THEN OLD.size
WHEN key = old_workspace_count_key THEN 1
END),
updated_at = NOW()
WHERE key IN (old_workspace_size_key, old_workspace_count_key);
INSERT INTO counters (key, value, created_at, updated_at)
VALUES
(workspace_size_key, NEW.size, NOW(), NOW()),
(workspace_count_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
ELSE
size_difference := NEW.size - OLD.size;
IF size_difference <> 0 THEN
UPDATE counters
SET value = GREATEST(0, value + size_difference),
updated_at = NOW()
WHERE key = workspace_size_key;
END IF;
END IF;
IF OLD.created_by IS DISTINCT FROM NEW.created_by THEN
UPDATE counters
SET value = GREATEST(0, value - CASE
WHEN key = old_user_size_key THEN OLD.size
WHEN key = old_user_count_key THEN 1
END),
updated_at = NOW()
WHERE key IN (old_user_size_key, old_user_count_key);
INSERT INTO counters (key, value, created_at, updated_at)
VALUES
(user_size_key, NEW.size, NOW(), NOW()),
(user_count_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
ELSE
size_difference := NEW.size - OLD.size;
IF size_difference <> 0 THEN
UPDATE counters
SET value = GREATEST(0, value + size_difference),
updated_at = NOW()
WHERE key = user_size_key;
END IF;
END IF;
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_upload_usage_counters ON uploads;
CREATE TRIGGER trg_upload_usage_counters
AFTER INSERT OR UPDATE OR DELETE ON uploads
FOR EACH ROW
EXECUTE FUNCTION fn_update_upload_usage_counters();
`.execute(db);
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_upload_usage_counters ON uploads;
DROP FUNCTION IF EXISTS fn_update_upload_usage_counters();
`.execute(db);
},
};

View File

@@ -1,152 +0,0 @@
import { Migration, sql } from 'kysely';
export const createNodeCounterTriggers: Migration = {
up: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_increment_workspace_node_counter ON nodes;
DROP TRIGGER IF EXISTS trg_decrement_workspace_node_counter ON nodes;
DROP FUNCTION IF EXISTS fn_increment_workspace_node_counter();
DROP FUNCTION IF EXISTS fn_decrement_workspace_node_counter();
DROP TRIGGER IF EXISTS trg_increment_user_node_counter ON nodes;
DROP TRIGGER IF EXISTS trg_decrement_user_node_counter ON nodes;
DROP FUNCTION IF EXISTS fn_increment_user_node_counter();
DROP FUNCTION IF EXISTS fn_decrement_user_node_counter();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.nodes.count')
.execute();
const now = new Date();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('nodes')
.select([
eb
.fn('concat', [
eb.ref('workspace_id'),
eb.cast(eb.val('.nodes.count'), 'varchar'),
])
.as('key'),
eb.fn.count('id').as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('workspace_id')
)
.execute();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('nodes')
.select([
eb
.fn('concat', [
eb.ref('created_by'),
eb.cast(eb.val('.nodes.count'), 'varchar'),
])
.as('key'),
eb.fn.count('id').as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('created_by')
)
.execute();
await sql`
CREATE OR REPLACE FUNCTION fn_update_node_counters() RETURNS TRIGGER AS $$
DECLARE
workspace_key text;
user_key text;
old_workspace_key text;
old_user_key text;
BEGIN
IF TG_OP = 'INSERT' THEN
workspace_key := CONCAT(NEW.workspace_id, '.nodes.count');
user_key := CONCAT(NEW.created_by, '.nodes.count');
INSERT INTO counters (key, value, created_at, updated_at)
VALUES
(workspace_key, 1, NOW(), NOW()),
(user_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
RETURN NEW;
ELSIF TG_OP = 'DELETE' THEN
workspace_key := CONCAT(OLD.workspace_id, '.nodes.count');
user_key := CONCAT(OLD.created_by, '.nodes.count');
UPDATE counters
SET value = GREATEST(0, value - 1), updated_at = NOW()
WHERE key IN (workspace_key, user_key);
RETURN OLD;
ELSE
workspace_key := CONCAT(NEW.workspace_id, '.nodes.count');
user_key := CONCAT(NEW.created_by, '.nodes.count');
old_workspace_key := CONCAT(OLD.workspace_id, '.nodes.count');
old_user_key := CONCAT(OLD.created_by, '.nodes.count');
IF OLD.workspace_id IS DISTINCT FROM NEW.workspace_id THEN
UPDATE counters
SET value = GREATEST(0, value - 1), updated_at = NOW()
WHERE key = old_workspace_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (workspace_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
END IF;
IF OLD.created_by IS DISTINCT FROM NEW.created_by THEN
UPDATE counters
SET value = GREATEST(0, value - 1), updated_at = NOW()
WHERE key = old_user_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (user_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
END IF;
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_node_counters ON nodes;
CREATE TRIGGER trg_node_counters
AFTER INSERT OR UPDATE OR DELETE ON nodes
FOR EACH ROW
EXECUTE FUNCTION fn_update_node_counters();
`.execute(db);
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_node_counters ON nodes;
DROP FUNCTION IF EXISTS fn_update_node_counters();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.nodes.count')
.execute();
},
};

View File

@@ -1,146 +0,0 @@
import { Migration, sql } from 'kysely';
export const createDocumentCounterTriggers: Migration = {
up: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_document_counters ON documents;
DROP FUNCTION IF EXISTS fn_update_document_counters();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.documents.count')
.execute();
const now = new Date();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('documents')
.select([
eb
.fn('concat', [
eb.ref('workspace_id'),
eb.cast(eb.val('.documents.count'), 'varchar'),
])
.as('key'),
eb.fn.count('id').as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('workspace_id')
)
.execute();
await db
.insertInto('counters')
.columns(['key', 'value', 'created_at', 'updated_at'])
.expression((eb) =>
eb
.selectFrom('documents')
.select([
eb
.fn('concat', [
eb.ref('created_by'),
eb.cast(eb.val('.documents.count'), 'varchar'),
])
.as('key'),
eb.fn.count('id').as('value'),
eb.val(now).as('created_at'),
eb.val(now).as('updated_at'),
])
.groupBy('created_by')
)
.execute();
await sql`
CREATE OR REPLACE FUNCTION fn_update_document_counters() RETURNS TRIGGER AS $$
DECLARE
workspace_key text;
user_key text;
old_workspace_key text;
old_user_key text;
BEGIN
IF TG_OP = 'INSERT' THEN
workspace_key := CONCAT(NEW.workspace_id, '.documents.count');
user_key := CONCAT(NEW.created_by, '.documents.count');
INSERT INTO counters (key, value, created_at, updated_at)
VALUES
(workspace_key, 1, NOW(), NOW()),
(user_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
RETURN NEW;
ELSIF TG_OP = 'DELETE' THEN
workspace_key := CONCAT(OLD.workspace_id, '.documents.count');
user_key := CONCAT(OLD.created_by, '.documents.count');
UPDATE counters
SET value = GREATEST(0, value - 1), updated_at = NOW()
WHERE key IN (workspace_key, user_key);
RETURN OLD;
ELSE
workspace_key := CONCAT(NEW.workspace_id, '.documents.count');
user_key := CONCAT(NEW.created_by, '.documents.count');
old_workspace_key := CONCAT(OLD.workspace_id, '.documents.count');
old_user_key := CONCAT(OLD.created_by, '.documents.count');
IF OLD.workspace_id IS DISTINCT FROM NEW.workspace_id THEN
UPDATE counters
SET value = GREATEST(0, value - 1), updated_at = NOW()
WHERE key = old_workspace_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (workspace_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
END IF;
IF OLD.created_by IS DISTINCT FROM NEW.created_by THEN
UPDATE counters
SET value = GREATEST(0, value - 1), updated_at = NOW()
WHERE key = old_user_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (user_key, 1, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + 1,
updated_at = NOW();
END IF;
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_document_counters ON documents;
CREATE TRIGGER trg_document_counters
AFTER INSERT OR UPDATE OR DELETE ON documents
FOR EACH ROW
EXECUTE FUNCTION fn_update_document_counters();
`.execute(db);
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_document_counters ON documents;
DROP FUNCTION IF EXISTS fn_update_document_counters();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.documents.count')
.execute();
},
};

View File

@@ -1,144 +0,0 @@
import { Migration, sql } from 'kysely';
export const createNodeUpdateSizeCounterTriggers: Migration = {
up: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_node_update_size_counters ON node_updates;
DROP FUNCTION IF EXISTS fn_update_node_update_size_counters();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.nodes.size')
.execute();
await sql`
INSERT INTO counters (key, value, created_at, updated_at)
SELECT
CONCAT(workspace_id, '.nodes.size') AS key,
COALESCE(SUM(octet_length(data)), 0) AS value,
NOW(),
NOW()
FROM node_updates
GROUP BY workspace_id;
`.execute(db);
await sql`
INSERT INTO counters (key, value, created_at, updated_at)
SELECT
CONCAT(created_by, '.nodes.size') AS key,
COALESCE(SUM(octet_length(data)), 0) AS value,
NOW(),
NOW()
FROM node_updates
GROUP BY created_by;
`.execute(db);
await sql`
CREATE OR REPLACE FUNCTION fn_update_node_update_size_counters() RETURNS TRIGGER AS $$
DECLARE
workspace_key text;
user_key text;
old_workspace_key text;
old_user_key text;
new_size bigint := 0;
old_size bigint := 0;
size_difference bigint := 0;
BEGIN
IF TG_OP = 'INSERT' THEN
workspace_key := CONCAT(NEW.workspace_id, '.nodes.size');
user_key := CONCAT(NEW.created_by, '.nodes.size');
new_size := COALESCE(octet_length(NEW.data), 0);
INSERT INTO counters (key, value, created_at, updated_at)
VALUES
(workspace_key, new_size, NOW(), NOW()),
(user_key, new_size, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
RETURN NEW;
ELSIF TG_OP = 'DELETE' THEN
workspace_key := CONCAT(OLD.workspace_id, '.nodes.size');
user_key := CONCAT(OLD.created_by, '.nodes.size');
old_size := COALESCE(octet_length(OLD.data), 0);
UPDATE counters
SET value = GREATEST(0, value - CASE
WHEN key = workspace_key THEN old_size
WHEN key = user_key THEN old_size
END),
updated_at = NOW()
WHERE key IN (workspace_key, user_key);
RETURN OLD;
ELSE
workspace_key := CONCAT(NEW.workspace_id, '.nodes.size');
user_key := CONCAT(NEW.created_by, '.nodes.size');
old_workspace_key := CONCAT(OLD.workspace_id, '.nodes.size');
old_user_key := CONCAT(OLD.created_by, '.nodes.size');
new_size := COALESCE(octet_length(NEW.data), 0);
old_size := COALESCE(octet_length(OLD.data), 0);
size_difference := new_size - old_size;
IF OLD.workspace_id IS DISTINCT FROM NEW.workspace_id THEN
UPDATE counters
SET value = GREATEST(0, value - old_size), updated_at = NOW()
WHERE key = old_workspace_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (workspace_key, new_size, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
ELSIF size_difference <> 0 THEN
UPDATE counters
SET value = GREATEST(0, value + size_difference), updated_at = NOW()
WHERE key = workspace_key;
END IF;
IF OLD.created_by IS DISTINCT FROM NEW.created_by THEN
UPDATE counters
SET value = GREATEST(0, value - old_size), updated_at = NOW()
WHERE key = old_user_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (user_key, new_size, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
ELSIF size_difference <> 0 THEN
UPDATE counters
SET value = GREATEST(0, value + size_difference), updated_at = NOW()
WHERE key = user_key;
END IF;
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_node_update_size_counters ON node_updates;
CREATE TRIGGER trg_node_update_size_counters
AFTER INSERT OR UPDATE OR DELETE ON node_updates
FOR EACH ROW
EXECUTE FUNCTION fn_update_node_update_size_counters();
`.execute(db);
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_node_update_size_counters ON node_updates;
DROP FUNCTION IF EXISTS fn_update_node_update_size_counters();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.nodes.size')
.execute();
},
};

View File

@@ -1,144 +0,0 @@
import { Migration, sql } from 'kysely';
export const createDocumentUpdateSizeCounterTriggers: Migration = {
up: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_document_update_size_counters ON document_updates;
DROP FUNCTION IF EXISTS fn_update_document_update_size_counters();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.documents.size')
.execute();
await sql`
INSERT INTO counters (key, value, created_at, updated_at)
SELECT
CONCAT(workspace_id, '.documents.size') AS key,
COALESCE(SUM(octet_length(data)), 0) AS value,
NOW(),
NOW()
FROM document_updates
GROUP BY workspace_id;
`.execute(db);
await sql`
INSERT INTO counters (key, value, created_at, updated_at)
SELECT
CONCAT(created_by, '.documents.size') AS key,
COALESCE(SUM(octet_length(data)), 0) AS value,
NOW(),
NOW()
FROM document_updates
GROUP BY created_by;
`.execute(db);
await sql`
CREATE OR REPLACE FUNCTION fn_update_document_update_size_counters() RETURNS TRIGGER AS $$
DECLARE
workspace_key text;
user_key text;
old_workspace_key text;
old_user_key text;
new_size bigint := 0;
old_size bigint := 0;
size_difference bigint := 0;
BEGIN
IF TG_OP = 'INSERT' THEN
workspace_key := CONCAT(NEW.workspace_id, '.documents.size');
user_key := CONCAT(NEW.created_by, '.documents.size');
new_size := COALESCE(octet_length(NEW.data), 0);
INSERT INTO counters (key, value, created_at, updated_at)
VALUES
(workspace_key, new_size, NOW(), NOW()),
(user_key, new_size, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
RETURN NEW;
ELSIF TG_OP = 'DELETE' THEN
workspace_key := CONCAT(OLD.workspace_id, '.documents.size');
user_key := CONCAT(OLD.created_by, '.documents.size');
old_size := COALESCE(octet_length(OLD.data), 0);
UPDATE counters
SET value = GREATEST(0, value - CASE
WHEN key = workspace_key THEN old_size
WHEN key = user_key THEN old_size
END),
updated_at = NOW()
WHERE key IN (workspace_key, user_key);
RETURN OLD;
ELSE
workspace_key := CONCAT(NEW.workspace_id, '.documents.size');
user_key := CONCAT(NEW.created_by, '.documents.size');
old_workspace_key := CONCAT(OLD.workspace_id, '.documents.size');
old_user_key := CONCAT(OLD.created_by, '.documents.size');
new_size := COALESCE(octet_length(NEW.data), 0);
old_size := COALESCE(octet_length(OLD.data), 0);
size_difference := new_size - old_size;
IF OLD.workspace_id IS DISTINCT FROM NEW.workspace_id THEN
UPDATE counters
SET value = GREATEST(0, value - old_size), updated_at = NOW()
WHERE key = old_workspace_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (workspace_key, new_size, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
ELSIF size_difference <> 0 THEN
UPDATE counters
SET value = GREATEST(0, value + size_difference), updated_at = NOW()
WHERE key = workspace_key;
END IF;
IF OLD.created_by IS DISTINCT FROM NEW.created_by THEN
UPDATE counters
SET value = GREATEST(0, value - old_size), updated_at = NOW()
WHERE key = old_user_key;
INSERT INTO counters (key, value, created_at, updated_at)
VALUES (user_key, new_size, NOW(), NOW())
ON CONFLICT (key)
DO UPDATE SET
value = counters.value + EXCLUDED.value,
updated_at = NOW();
ELSIF size_difference <> 0 THEN
UPDATE counters
SET value = GREATEST(0, value + size_difference), updated_at = NOW()
WHERE key = user_key;
END IF;
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_document_update_size_counters ON document_updates;
CREATE TRIGGER trg_document_update_size_counters
AFTER INSERT OR UPDATE OR DELETE ON document_updates
FOR EACH ROW
EXECUTE FUNCTION fn_update_document_update_size_counters();
`.execute(db);
},
down: async (db) => {
await sql`
DROP TRIGGER IF EXISTS trg_document_update_size_counters ON document_updates;
DROP FUNCTION IF EXISTS fn_update_document_update_size_counters();
`.execute(db);
await db
.deleteFrom('counters')
.where('key', 'like', '%.documents.size')
.execute();
},
};

View File

@@ -31,11 +31,7 @@ import { removeDocumentUpdateRevisionTrigger } from './00028-remove-document-upd
import { addWorkspaceStorageLimitColumns } from './00029-add-workspace-storage-limit-columns';
import { addWorkspaceIndexToUploads } from './00030-add-workspace-index-to-uploads';
import { addCreatedAtIndexToUploads } from './00031-add-created-at-index-to-uploads';
import { createUploadUsageCounterTriggers } from './00032-create-upload-usage-counter-triggers';
import { createNodeCounterTriggers } from './00033-create-node-counter-triggers';
import { createDocumentCounterTriggers } from './00034-create-document-counter-triggers';
import { createNodeUpdateSizeCounterTriggers } from './00035-create-node-update-size-counter-triggers';
import { createDocumentUpdateSizeCounterTriggers } from './00036-create-document-update-size-counter-triggers';
import { cleanupCounterTriggers } from './00032-cleanup-counter-triggers';
export const databaseMigrations: Record<string, Migration> = {
'00001_create_accounts_table': createAccountsTable,
@@ -73,11 +69,5 @@ export const databaseMigrations: Record<string, Migration> = {
'00029_add_workspace_storage_limit_columns': addWorkspaceStorageLimitColumns,
'00030_add_workspace_index_to_uploads': addWorkspaceIndexToUploads,
'00031_add_created_at_index_to_uploads': addCreatedAtIndexToUploads,
'00032_create_upload_usage_counter_triggers': createUploadUsageCounterTriggers,
'00033_create_node_counter_triggers': createNodeCounterTriggers,
'00034_create_document_counter_triggers': createDocumentCounterTriggers,
'00035_create_node_update_size_counter_triggers':
createNodeUpdateSizeCounterTriggers,
'00036_create_document_update_size_counter_triggers':
createDocumentUpdateSizeCounterTriggers,
'00032_cleanup_counter_triggers': cleanupCounterTriggers,
};

View File

@@ -69,7 +69,6 @@ interface WorkspaceTable {
created_by: ColumnType<string, string, never>;
updated_by: ColumnType<string | null, string | null, string>;
status: ColumnType<number, number, number>;
storage_limit: ColumnType<string | null, string | null, string | null>;
max_file_size: ColumnType<string | null, string | null, string | null>;
}

View File

@@ -86,12 +86,11 @@ export const buildLoginSuccessOutput = async (
name: workspace.name,
avatar: workspace.avatar,
description: workspace.description,
status: workspace.status,
user: {
id: user.id,
accountId: user.account_id,
role: user.role as WorkspaceRole,
storageLimit: user.storage_limit,
maxFileSize: user.max_file_size,
},
});
}

View File

@@ -10,13 +10,11 @@ import { postgresConfigSchema } from './postgres';
import { redisConfigSchema } from './redis';
import { serverConfigSchema } from './server';
import { storageConfigSchema } from './storage';
import { userConfigSchema } from './user';
import { workspaceConfigSchema } from './workspace';
const configSchema = z.object({
server: serverConfigSchema,
account: accountConfigSchema,
user: userConfigSchema,
postgres: postgresConfigSchema,
redis: redisConfigSchema,
storage: storageConfigSchema,

View File

@@ -1,6 +1,7 @@
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'node:url';
import path from 'path';
import { Configuration } from './index';
type ConfigSource = Partial<Configuration>;

View File

@@ -1,8 +0,0 @@
import { z } from 'zod/v4';
export const userConfigSchema = z.object({
storageLimit: z.string().default('10737418240'),
maxFileSize: z.string().default('104857600'),
});
export type UserConfig = z.infer<typeof userConfigSchema>;

View File

@@ -1,7 +1,6 @@
import { z } from 'zod/v4';
export const workspaceConfigSchema = z.object({
storageLimit: z.string().optional().nullable(),
maxFileSize: z.string().optional().nullable(),
});

View File

@@ -3,12 +3,6 @@ import { Kysely, Transaction } from 'kysely';
import { DatabaseSchema } from '@colanode/server/data/schema';
export type CounterKey =
| `${string}.uploads.size`
| `${string}.uploads.count`
| `${string}.nodes.count`
| `${string}.nodes.size`
| `${string}.documents.count`
| `${string}.documents.size`
| `node.updates.merge.cursor`
| `document.updates.merge.cursor`;

View File

@@ -11,10 +11,10 @@ import {
} from '@colanode/core';
import { decodeState, YDoc } from '@colanode/crdt';
import { database } from '@colanode/server/data/database';
import { SelectUser } from '@colanode/server/data/schema';
import { eventBus } from '@colanode/server/lib/event-bus';
import { createLogger } from '@colanode/server/lib/logger';
import { fetchNode, fetchNodeTree, mapNode } from '@colanode/server/lib/nodes';
import { WorkspaceContext } from '@colanode/server/types/api';
import {
CreateDocumentInput,
CreateDocumentOutput,
@@ -114,7 +114,7 @@ export const createDocument = async (
};
export const updateDocumentFromMutation = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: UpdateDocumentMutationData
): Promise<MutationStatus> => {
for (let count = 0; count < UPDATE_RETRIES_LIMIT; count++) {
@@ -128,7 +128,7 @@ export const updateDocumentFromMutation = async (
return MutationStatus.OK;
}
const result = await tryUpdateDocumentFromMutation(user, mutation);
const result = await tryUpdateDocumentFromMutation(workspace, mutation);
if (result.type === 'success') {
return result.output;
@@ -143,7 +143,7 @@ export const updateDocumentFromMutation = async (
};
const tryUpdateDocumentFromMutation = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: UpdateDocumentMutationData
): Promise<ConcurrentUpdateResult<MutationStatus>> => {
const tree = await fetchNodeTree(mutation.documentId);
@@ -163,10 +163,10 @@ const tryUpdateDocumentFromMutation = async (
const context: CanUpdateDocumentContext = {
user: {
id: user.id,
role: user.role,
workspaceId: user.workspace_id,
accountId: user.account_id,
id: workspace.user.id,
role: workspace.user.role,
workspaceId: workspace.id,
accountId: workspace.user.accountId,
},
node: mapNode(node),
tree: tree.map((node) => mapNode(node)),
@@ -211,10 +211,10 @@ const tryUpdateDocumentFromMutation = async (
id: mutation.updateId,
document_id: mutation.documentId,
root_id: node.root_id,
workspace_id: user.workspace_id,
workspace_id: workspace.id,
data: decodeState(mutation.data),
created_at: new Date(mutation.createdAt),
created_by: user.id,
created_by: workspace.user.id,
merged_updates: null,
})
.executeTakeFirst();
@@ -230,7 +230,7 @@ const tryUpdateDocumentFromMutation = async (
.set({
content: JSON.stringify(content),
updated_at: new Date(mutation.createdAt),
updated_by: user.id,
updated_by: workspace.user.id,
revision: createdDocumentUpdate.revision,
})
.where('id', '=', mutation.documentId)
@@ -241,10 +241,10 @@ const tryUpdateDocumentFromMutation = async (
.returningAll()
.values({
id: mutation.documentId,
workspace_id: user.workspace_id,
workspace_id: workspace.id,
content: JSON.stringify(content),
created_at: new Date(mutation.createdAt),
created_by: user.id,
created_by: workspace.user.id,
revision: createdDocumentUpdate.revision,
})
.onConflict((cb) => cb.doNothing())
@@ -267,14 +267,14 @@ const tryUpdateDocumentFromMutation = async (
eventBus.publish({
type: 'document.updated',
documentId: mutation.documentId,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
eventBus.publish({
type: 'document.update.created',
documentId: mutation.documentId,
rootId: node.root_id,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
return {

View File

@@ -6,12 +6,12 @@ import {
MutationStatus,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { SelectUser } from '@colanode/server/data/schema';
import { eventBus } from '@colanode/server/lib/event-bus';
import { mapNode } from '@colanode/server/lib/nodes';
import { WorkspaceContext } from '@colanode/server/types/api';
export const markNodeAsSeen = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: NodeInteractionSeenMutation
): Promise<MutationStatus> => {
const node = await database
@@ -35,7 +35,7 @@ export const markNodeAsSeen = async (
}
const rootNode = mapNode(root);
const role = extractNodeRole(rootNode, user.id);
const role = extractNodeRole(rootNode, workspace.user.id);
if (!role || !hasNodeRole(role, 'viewer')) {
return MutationStatus.FORBIDDEN;
}
@@ -44,7 +44,7 @@ export const markNodeAsSeen = async (
.selectFrom('node_interactions')
.selectAll()
.where('node_id', '=', mutation.data.nodeId)
.where('collaborator_id', '=', user.id)
.where('collaborator_id', '=', workspace.user.id)
.executeTakeFirst();
if (
@@ -62,7 +62,7 @@ export const markNodeAsSeen = async (
.returningAll()
.values({
node_id: mutation.data.nodeId,
collaborator_id: user.id,
collaborator_id: workspace.user.id,
first_seen_at: firstSeenAt,
last_seen_at: lastSeenAt,
root_id: root.id,
@@ -92,7 +92,7 @@ export const markNodeAsSeen = async (
};
export const markNodeAsOpened = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: NodeInteractionOpenedMutation
): Promise<MutationStatus> => {
const node = await database
@@ -116,7 +116,7 @@ export const markNodeAsOpened = async (
}
const rootNode = mapNode(root);
const role = extractNodeRole(rootNode, user.id);
const role = extractNodeRole(rootNode, workspace.user.id);
if (!role || !hasNodeRole(role, 'viewer')) {
return MutationStatus.FORBIDDEN;
}
@@ -125,7 +125,7 @@ export const markNodeAsOpened = async (
.selectFrom('node_interactions')
.selectAll()
.where('node_id', '=', mutation.data.nodeId)
.where('collaborator_id', '=', user.id)
.where('collaborator_id', '=', workspace.user.id)
.executeTakeFirst();
if (
@@ -143,7 +143,7 @@ export const markNodeAsOpened = async (
.returningAll()
.values({
node_id: mutation.data.nodeId,
collaborator_id: user.id,
collaborator_id: workspace.user.id,
first_opened_at: firstOpenedAt,
last_opened_at: lastOpenedAt,
root_id: root.id,

View File

@@ -6,12 +6,12 @@ import {
MutationStatus,
} from '@colanode/core';
import { database } from '@colanode/server/data/database';
import { SelectUser } from '@colanode/server/data/schema';
import { eventBus } from '@colanode/server/lib/event-bus';
import { fetchNodeTree, mapNode } from '@colanode/server/lib/nodes';
import { WorkspaceContext } from '@colanode/server/types/api';
export const createNodeReaction = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: CreateNodeReactionMutation
): Promise<MutationStatus> => {
const tree = await fetchNodeTree(mutation.data.nodeId);
@@ -32,10 +32,10 @@ export const createNodeReaction = async (
const model = getNodeModel(node.type);
const context: CanReactNodeContext = {
user: {
id: user.id,
role: user.role,
accountId: user.account_id,
workspaceId: user.workspace_id,
id: workspace.user.id,
role: workspace.user.role,
accountId: workspace.user.accountId,
workspaceId: workspace.id,
},
tree: tree.map(mapNode),
node: mapNode(node),
@@ -50,7 +50,7 @@ export const createNodeReaction = async (
.returningAll()
.values({
node_id: mutation.data.nodeId,
collaborator_id: user.id,
collaborator_id: workspace.user.id,
reaction: mutation.data.reaction,
workspace_id: root.workspace_id,
root_id: root.id,
@@ -80,7 +80,7 @@ export const createNodeReaction = async (
};
export const deleteNodeReaction = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: DeleteNodeReactionMutation
): Promise<MutationStatus> => {
const tree = await fetchNodeTree(mutation.data.nodeId);
@@ -101,10 +101,10 @@ export const deleteNodeReaction = async (
const model = getNodeModel(node.type);
const context: CanReactNodeContext = {
user: {
id: user.id,
role: user.role,
accountId: user.account_id,
workspaceId: user.workspace_id,
id: workspace.user.id,
role: workspace.user.role,
accountId: workspace.user.accountId,
workspaceId: workspace.id,
},
tree: tree.map(mapNode),
node: mapNode(node),
@@ -120,7 +120,7 @@ export const deleteNodeReaction = async (
deleted_at: new Date(mutation.data.deletedAt),
})
.where('node_id', '=', mutation.data.nodeId)
.where('collaborator_id', '=', user.id)
.where('collaborator_id', '=', workspace.user.id)
.where('reaction', '=', mutation.data.reaction)
.executeTakeFirst();
@@ -131,7 +131,7 @@ export const deleteNodeReaction = async (
eventBus.publish({
type: 'node.reaction.deleted',
nodeId: mutation.data.nodeId,
collaboratorId: user.id,
collaboratorId: workspace.user.id,
rootId: node.root_id,
workspaceId: node.workspace_id,
});

View File

@@ -22,7 +22,6 @@ import {
SelectCollaboration,
SelectNode,
SelectNodeUpdate,
SelectUser,
} from '@colanode/server/data/schema';
import {
applyCollaboratorUpdates,
@@ -32,6 +31,7 @@ import { eventBus } from '@colanode/server/lib/event-bus';
import { createLogger } from '@colanode/server/lib/logger';
import { storage } from '@colanode/server/lib/storage';
import { jobService } from '@colanode/server/services/job-service';
import { WorkspaceContext } from '@colanode/server/types/api';
import {
ConcurrentUpdateResult,
CreateNodeInput,
@@ -348,7 +348,7 @@ export const tryUpdateNode = async (
};
export const createNodeFromMutation = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: CreateNodeMutationData
): Promise<MutationStatus> => {
const existingNode = await fetchNode(mutation.nodeId);
@@ -369,10 +369,10 @@ export const createNodeFromMutation = async (
const tree = parentId ? await fetchNodeTree(parentId) : [];
const canCreateNodeContext: CanCreateNodeContext = {
user: {
id: user.id,
role: user.role,
workspaceId: user.workspace_id,
accountId: user.account_id,
id: workspace.user.id,
role: workspace.user.role,
workspaceId: workspace.id,
accountId: workspace.user.accountId,
},
tree: tree.map(mapNode),
attributes,
@@ -388,10 +388,10 @@ export const createNodeFromMutation = async (
).map(([userId, role]) => ({
collaborator_id: userId,
node_id: mutation.nodeId,
workspace_id: user.workspace_id,
workspace_id: workspace.id,
role,
created_at: new Date(),
created_by: user.id,
created_by: workspace.user.id,
}));
try {
@@ -405,10 +405,10 @@ export const createNodeFromMutation = async (
id: mutation.updateId,
node_id: mutation.nodeId,
root_id: rootId,
workspace_id: user.workspace_id,
workspace_id: workspace.id,
data: ydoc.getState(),
created_at: new Date(mutation.createdAt),
created_by: user.id,
created_by: workspace.user.id,
})
.executeTakeFirst();
@@ -423,9 +423,9 @@ export const createNodeFromMutation = async (
id: mutation.nodeId,
root_id: rootId,
attributes: JSON.stringify(attributes),
workspace_id: user.workspace_id,
workspace_id: workspace.id,
created_at: new Date(mutation.createdAt),
created_by: user.id,
created_by: workspace.user.id,
revision: createdNodeUpdate.revision,
})
.executeTakeFirst();
@@ -451,7 +451,7 @@ export const createNodeFromMutation = async (
type: 'node.created',
nodeId: mutation.nodeId,
rootId,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
for (const createdCollaboration of createdCollaborations) {
@@ -459,7 +459,7 @@ export const createNodeFromMutation = async (
type: 'collaboration.created',
collaboratorId: createdCollaboration.collaborator_id,
nodeId: mutation.nodeId,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
}
@@ -471,7 +471,7 @@ export const createNodeFromMutation = async (
};
export const updateNodeFromMutation = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: UpdateNodeMutationData
): Promise<MutationStatus> => {
for (let count = 0; count < UPDATE_RETRIES_LIMIT; count++) {
@@ -485,7 +485,7 @@ export const updateNodeFromMutation = async (
return MutationStatus.OK;
}
const result = await tryUpdateNodeFromMutation(user, mutation);
const result = await tryUpdateNodeFromMutation(workspace, mutation);
if (result.type === 'success') {
return result.output;
@@ -500,7 +500,7 @@ export const updateNodeFromMutation = async (
};
const tryUpdateNodeFromMutation = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: UpdateNodeMutationData
): Promise<ConcurrentUpdateResult<MutationStatus>> => {
const tree = await fetchNodeTree(mutation.nodeId);
@@ -527,10 +527,10 @@ const tryUpdateNodeFromMutation = async (
const canUpdateNodeContext: CanUpdateAttributesContext = {
user: {
id: user.id,
role: user.role,
workspaceId: user.workspace_id,
accountId: user.account_id,
id: workspace.user.id,
role: workspace.user.role,
workspaceId: workspace.id,
accountId: workspace.user.accountId,
},
tree: tree.map(mapNode),
node: mapNode(node),
@@ -558,10 +558,10 @@ const tryUpdateNodeFromMutation = async (
id: mutation.updateId,
node_id: mutation.nodeId,
root_id: node.root_id,
workspace_id: user.workspace_id,
workspace_id: workspace.id,
data: update,
created_at: new Date(mutation.createdAt),
created_by: user.id,
created_by: workspace.user.id,
})
.executeTakeFirst();
@@ -575,7 +575,7 @@ const tryUpdateNodeFromMutation = async (
.set({
attributes: attributesJson,
updated_at: new Date(mutation.createdAt),
updated_by: user.id,
updated_by: workspace.user.id,
revision: createdNodeUpdate.revision,
})
.where('id', '=', mutation.nodeId)
@@ -590,8 +590,8 @@ const tryUpdateNodeFromMutation = async (
await applyCollaboratorUpdates(
trx,
mutation.nodeId,
user.id,
user.workspace_id,
workspace.user.id,
workspace.id,
collaboratorChanges
);
@@ -606,7 +606,7 @@ const tryUpdateNodeFromMutation = async (
type: 'node.updated',
nodeId: mutation.nodeId,
rootId: node.root_id,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
for (const createdCollaboration of createdCollaborations) {
@@ -614,7 +614,7 @@ const tryUpdateNodeFromMutation = async (
type: 'collaboration.created',
collaboratorId: createdCollaboration.collaborator_id,
nodeId: mutation.nodeId,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
}
@@ -623,7 +623,7 @@ const tryUpdateNodeFromMutation = async (
type: 'collaboration.updated',
collaboratorId: updatedCollaboration.collaborator_id,
nodeId: mutation.nodeId,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
}
@@ -634,7 +634,7 @@ const tryUpdateNodeFromMutation = async (
};
export const deleteNodeFromMutation = async (
user: SelectUser,
workspace: WorkspaceContext,
mutation: DeleteNodeMutationData
): Promise<MutationStatus> => {
const tree = await fetchNodeTree(mutation.nodeId);
@@ -650,10 +650,10 @@ export const deleteNodeFromMutation = async (
const model = getNodeModel(node.type);
const canDeleteNodeContext: CanDeleteNodeContext = {
user: {
id: user.id,
role: user.role,
workspaceId: user.workspace_id,
accountId: user.account_id,
id: workspace.user.id,
role: workspace.user.role,
workspaceId: workspace.id,
accountId: workspace.user.accountId,
},
tree: tree.map(mapNode),
node: mapNode(node),
@@ -682,7 +682,7 @@ export const deleteNodeFromMutation = async (
root_id: node.root_id,
workspace_id: node.workspace_id,
deleted_at: new Date(mutation.deletedAt),
deleted_by: user.id,
deleted_by: workspace.user.id,
})
.executeTakeFirst();
@@ -716,15 +716,15 @@ export const deleteNodeFromMutation = async (
type: 'node.deleted',
nodeId: mutation.nodeId,
rootId: node.root_id,
workspaceId: user.workspace_id,
workspaceId: workspace.id,
});
await jobService.addJob({
type: 'node.clean',
nodeId: mutation.nodeId,
parentId: node.parent_id,
workspaceId: user.workspace_id,
userId: user.id,
workspaceId: workspace.id,
userId: workspace.user.id,
});
return MutationStatus.OK;

View File

@@ -2,7 +2,7 @@ import { sha256 } from 'js-sha256';
import { database } from '@colanode/server/data/database';
import { uuid } from '@colanode/server/lib/utils';
import { RequestAccount } from '@colanode/server/types/api';
import { AccountContext } from '@colanode/server/types/api';
const DEVICE_TOKEN_PREFIX = 'cnd_';
@@ -23,7 +23,7 @@ type VerifyTokenResult =
}
| {
authenticated: true;
account: RequestAccount;
account: AccountContext;
};
export const generateToken = (deviceId: string): GenerateTokenResult => {

View File

@@ -36,7 +36,6 @@ export const createWorkspace = async (
created_at: date,
created_by: account.id,
status: WorkspaceStatus.Active,
storage_limit: config.workspace.storageLimit,
max_file_size: config.workspace.maxFileSize,
})
.returningAll()
@@ -56,11 +55,11 @@ export const createWorkspace = async (
name: account.name,
email: account.email,
avatar: account.avatar,
storage_limit: config.user.storageLimit,
max_file_size: config.user.maxFileSize,
created_at: date,
created_by: account.id,
status: UserStatus.Active,
max_file_size: '0',
storage_limit: '0',
})
.returningAll()
.executeTakeFirst();
@@ -154,12 +153,11 @@ export const createWorkspace = async (
name: workspace.name,
description: workspace.description,
avatar: workspace.avatar,
status: workspace.status,
user: {
id: user.id,
accountId: user.account_id,
role: user.role,
storageLimit: user.storage_limit,
maxFileSize: user.max_file_size,
},
};
};

View File

@@ -4,7 +4,7 @@ import { generateId, IdType } from '@colanode/core';
import { redis } from '@colanode/server/data/redis';
import { eventBus } from '@colanode/server/lib/event-bus';
import { SocketConnection } from '@colanode/server/services/socket-connection';
import { ClientContext, RequestAccount } from '@colanode/server/types/api';
import { ClientContext, AccountContext } from '@colanode/server/types/api';
import { SocketContext } from '@colanode/server/types/sockets';
class SocketService {
@@ -28,7 +28,7 @@ class SocketService {
});
}
public async initSocket(account: RequestAccount, client: ClientContext) {
public async initSocket(account: AccountContext, client: ClientContext) {
const id = generateId(IdType.Socket);
const context: SocketContext = {
id,

View File

@@ -1,8 +1,21 @@
export type RequestAccount = {
import { WorkspaceRole, WorkspaceStatus } from '@colanode/core';
export type AccountContext = {
id: string;
deviceId: string;
};
export type WorkspaceContext = {
id: string;
maxFileSize?: string | null;
status: WorkspaceStatus;
user: {
id: string;
accountId: string;
role: WorkspaceRole;
};
};
export type ClientType = 'web' | 'desktop';
export type ClientContext = {

View File

@@ -11,10 +11,10 @@ export const createWorkspacesTable: Migration = {
.addColumn('description', 'text')
.addColumn('avatar', 'text')
.addColumn('role', 'text', (col) => col.notNull())
.addColumn('storage_limit', 'integer', (col) => col.notNull())
.addColumn('max_file_size', 'integer', (col) => col.notNull())
.addColumn('max_file_size', 'integer')
.addColumn('created_at', 'text', (col) => col.notNull())
.addColumn('updated_at', 'text')
.addColumn('status', 'integer', (col) => col.notNull())
.execute();
},
down: async (db) => {

View File

@@ -1,7 +1,7 @@
import { ColumnType, Insertable, Selectable, Updateable } from 'kysely';
import { JobScheduleStatus, JobStatus } from '@colanode/client/jobs';
import { FileSubtype, WorkspaceRole } from '@colanode/core';
import { FileSubtype, WorkspaceRole, WorkspaceStatus } from '@colanode/core';
interface ServerTable {
domain: ColumnType<string, string, never>;
@@ -119,10 +119,10 @@ interface WorkspacesTable {
description: ColumnType<string | null, string | null, string | null>;
avatar: ColumnType<string | null, string | null, string | null>;
role: ColumnType<WorkspaceRole, WorkspaceRole, WorkspaceRole>;
storage_limit: ColumnType<string, string, string>;
max_file_size: ColumnType<string, string, string>;
max_file_size: ColumnType<string | null, string | null, string | null>;
created_at: ColumnType<string, string, string>;
updated_at: ColumnType<string | null, string | null, string | null>;
status: ColumnType<WorkspaceStatus, WorkspaceStatus, WorkspaceStatus>;
}
export type SelectWorkspace = Selectable<WorkspacesTable>;

View File

@@ -53,11 +53,11 @@ export abstract class AuthMutationHandlerBase {
user_id: workspace.user.id,
account_id: createdAccount.id,
role: workspace.user.role,
storage_limit: workspace.user.storageLimit,
max_file_size: workspace.user.maxFileSize,
max_file_size: workspace.maxFileSize ?? undefined,
avatar: workspace.avatar,
description: workspace.description,
created_at: new Date().toISOString(),
status: workspace.status,
})
.executeTakeFirst();

View File

@@ -1,6 +1,7 @@
import { MutationError, MutationErrorCode } from '@colanode/client/mutations';
import { AppService } from '@colanode/client/services/app-service';
import { WorkspaceService } from '@colanode/client/services/workspaces/workspace-service';
import { WorkspaceStatus } from '@colanode/core';
export abstract class WorkspaceMutationHandlerBase {
protected readonly app: AppService;
@@ -18,6 +19,13 @@ export abstract class WorkspaceMutationHandlerBase {
);
}
if (workspace.status === WorkspaceStatus.Readonly) {
throw new MutationError(
MutationErrorCode.WorkspaceReadonly,
'Workspace is in readonly mode and you cannot make any changes.'
);
}
return workspace;
}
}

View File

@@ -10,9 +10,7 @@ import {
import { AppService } from '@colanode/client/services/app-service';
import { WorkspaceCreateInput, WorkspaceOutput } from '@colanode/core';
export class WorkspaceCreateMutationHandler
implements MutationHandler<WorkspaceCreateMutationInput>
{
export class WorkspaceCreateMutationHandler implements MutationHandler<WorkspaceCreateMutationInput> {
private readonly app: AppService;
constructor(app: AppService) {
@@ -55,9 +53,9 @@ export class WorkspaceCreateMutationHandler
description: response.description,
avatar: response.avatar,
role: response.user.role,
storage_limit: response.user.storageLimit,
max_file_size: response.user.maxFileSize,
max_file_size: response.maxFileSize,
created_at: new Date().toISOString(),
status: response.status,
})
.onConflict((cb) => cb.doNothing())
.executeTakeFirst();

View File

@@ -33,8 +33,6 @@ import { ServerListQueryHandler } from './servers/server-list';
import { UserListQueryHandler } from './users/user-list';
import { UserSearchQueryHandler } from './users/user-search';
import { WorkspaceListQueryHandler } from './workspaces/workspace-list';
import { WorkspaceStorageGetQueryHandler } from './workspaces/workspace-storage-get';
import { WorkspaceStorageUsersGetQueryHandler } from './workspaces/workspace-storage-users-get';
export type QueryHandlerMap = {
[K in keyof QueryMap]: QueryHandler<QueryMap[K]['input']>;
@@ -50,10 +48,6 @@ export const buildQueryHandlerMap = (app: AppService): QueryHandlerMap => {
'record.field.value.count': new RecordFieldValueCountQueryHandler(app),
'user.search': new UserSearchQueryHandler(app),
'workspace.list': new WorkspaceListQueryHandler(app),
'workspace.storage.get': new WorkspaceStorageGetQueryHandler(app),
'workspace.storage.users.get': new WorkspaceStorageUsersGetQueryHandler(
app
),
'user.list': new UserListQueryHandler(app),
'emoji.list': new EmojiListQueryHandler(app),
'emoji.get': new EmojiGetQueryHandler(app),

View File

@@ -1,39 +0,0 @@
import { WorkspaceQueryHandlerBase } from '@colanode/client/handlers/queries/workspace-query-handler-base';
import { parseApiError } from '@colanode/client/lib/ky';
import { ChangeCheckResult, QueryHandler } from '@colanode/client/lib/types';
import { QueryError, QueryErrorCode } from '@colanode/client/queries';
import { WorkspaceStorageGetQueryInput } from '@colanode/client/queries/workspaces/workspace-storage-get';
import { Event } from '@colanode/client/types/events';
import { WorkspaceStorageGetOutput } from '@colanode/core';
export class WorkspaceStorageGetQueryHandler
extends WorkspaceQueryHandlerBase
implements QueryHandler<WorkspaceStorageGetQueryInput>
{
async handleQuery(
input: WorkspaceStorageGetQueryInput
): Promise<WorkspaceStorageGetOutput> {
const workspace = this.getWorkspace(input.userId);
try {
const output = await workspace.account.client
.get(`v1/workspaces/${workspace.workspaceId}/storage`)
.json<WorkspaceStorageGetOutput>();
return output;
} catch (error) {
const apiError = await parseApiError(error);
throw new QueryError(QueryErrorCode.ApiError, apiError.message);
}
}
async checkForChanges(
_event: Event,
_input: WorkspaceStorageGetQueryInput,
_output: WorkspaceStorageGetOutput
): Promise<ChangeCheckResult<WorkspaceStorageGetQueryInput>> {
return {
hasChanges: false,
};
}
}

View File

@@ -1,55 +0,0 @@
import { WorkspaceQueryHandlerBase } from '@colanode/client/handlers/queries/workspace-query-handler-base';
import { parseApiError } from '@colanode/client/lib/ky';
import { ChangeCheckResult, QueryHandler } from '@colanode/client/lib/types';
import { QueryError, QueryErrorCode } from '@colanode/client/queries';
import {
WorkspaceStorageUsersGetQueryInput,
WorkspaceStorageUsersGetQueryOutput,
} from '@colanode/client/queries/workspaces/workspace-storage-users-get';
import { Event } from '@colanode/client/types/events';
import { WorkspaceStorageUsersGetOutput } from '@colanode/core';
const DEFAULT_LIMIT = 100;
export class WorkspaceStorageUsersGetQueryHandler
extends WorkspaceQueryHandlerBase
implements QueryHandler<WorkspaceStorageUsersGetQueryInput>
{
async handleQuery(
input: WorkspaceStorageUsersGetQueryInput
): Promise<WorkspaceStorageUsersGetQueryOutput> {
const workspace = this.getWorkspace(input.userId);
const limit = input.limit ?? DEFAULT_LIMIT;
try {
const searchParams = new URLSearchParams({
limit: Math.max(1, Math.min(100, limit)).toString(),
});
if (input.cursor) {
searchParams.set('after', input.cursor);
}
const response = await workspace.account.client
.get(`v1/workspaces/${workspace.workspaceId}/users/storage`, {
searchParams,
})
.json<WorkspaceStorageUsersGetOutput>();
return response;
} catch (error) {
const apiError = await parseApiError(error);
throw new QueryError(QueryErrorCode.ApiError, apiError.message);
}
}
async checkForChanges(
_event: Event,
_input: WorkspaceStorageUsersGetQueryInput,
_output: WorkspaceStorageUsersGetQueryOutput
): Promise<ChangeCheckResult<WorkspaceStorageUsersGetQueryInput>> {
return {
hasChanges: false,
};
}
}

View File

@@ -150,8 +150,8 @@ export const mapWorkspace = (row: SelectWorkspace): Workspace => {
role: row.role,
avatar: row.avatar,
description: row.description,
maxFileSize: row.max_file_size.toString(),
storageLimit: row.storage_limit.toString(),
maxFileSize: row.max_file_size?.toString() ?? undefined,
status: row.status,
};
};

View File

@@ -1,4 +1,4 @@
import { Kysely, sql, Transaction } from 'kysely';
import { Kysely, Transaction } from 'kysely';
import { WorkspaceDatabaseSchema } from '@colanode/client/databases/workspace';
import { mapNode } from '@colanode/client/lib/mappers';
@@ -49,24 +49,6 @@ export const fetchNode = async (
return node ? mapNode(node) : undefined;
};
export const fetchUserStorageUsed = async (
database:
| Kysely<WorkspaceDatabaseSchema>
| Transaction<WorkspaceDatabaseSchema>,
userId: string
): Promise<bigint> => {
const storageUsedRow = await database
.selectFrom('nodes')
.select(({ fn }) => [
fn.sum(sql`json_extract(attributes, '$.size')`).as('storage_used'),
])
.where('type', '=', 'file')
.where('created_by', '=', userId)
.executeTakeFirst();
return BigInt(storageUsedRow?.storage_used ?? 0);
};
export const deleteNodeRelations = async (
database:
| Kysely<WorkspaceDatabaseSchema>

View File

@@ -81,6 +81,7 @@ export enum MutationErrorCode {
EmailVerificationFailed = 'email_verification_failed',
ServerNotFound = 'server_not_found',
WorkspaceNotFound = 'workspace_not_found',
WorkspaceReadonly = 'workspace_readonly',
WorkspaceNotCreated = 'workspace_not_created',
WorkspaceNotUpdated = 'workspace_not_updated',
SpaceNotFound = 'space_not_found',

View File

@@ -21,8 +21,6 @@ export * from './records/record-search';
export * from './users/user-list';
export * from './users/user-search';
export * from './workspaces/workspace-list';
export * from './workspaces/workspace-storage-get';
export * from './workspaces/workspace-storage-users-get';
export * from './avatars/avatar-get';
export * from './records/record-field-value-count';
export * from './files/upload-list';

View File

@@ -1,15 +0,0 @@
import { WorkspaceStorageGetOutput } from '@colanode/core';
export type WorkspaceStorageGetQueryInput = {
type: 'workspace.storage.get';
userId: string;
};
declare module '@colanode/client/queries' {
interface QueryMap {
'workspace.storage.get': {
input: WorkspaceStorageGetQueryInput;
output: WorkspaceStorageGetOutput;
};
}
}

View File

@@ -1,21 +0,0 @@
import { WorkspaceStorageUser } from '@colanode/core';
export type WorkspaceStorageUsersGetQueryInput = {
type: 'workspace.storage.users.get';
userId: string;
limit: number;
cursor?: string;
};
export type WorkspaceStorageUsersGetQueryOutput = {
users: WorkspaceStorageUser[];
};
declare module '@colanode/client/queries' {
interface QueryMap {
'workspace.storage.users.get': {
input: WorkspaceStorageUsersGetQueryInput;
output: WorkspaceStorageUsersGetQueryOutput;
};
}
}

View File

@@ -229,8 +229,6 @@ export class AccountService {
description: workspace.description,
avatar: workspace.avatar,
role: workspace.user.role,
storage_limit: workspace.user.storageLimit,
max_file_size: workspace.user.maxFileSize,
})
.where('workspace_id', '=', workspace.id)
.executeTakeFirst();
@@ -251,9 +249,9 @@ export class AccountService {
description: workspace.description,
avatar: workspace.avatar,
role: workspace.user.role,
storage_limit: workspace.user.storageLimit,
max_file_size: workspace.user.maxFileSize,
max_file_size: workspace.maxFileSize ?? undefined,
created_at: new Date().toISOString(),
status: workspace.status,
})
.executeTakeFirst();

View File

@@ -11,7 +11,7 @@ import {
mapNode,
mapUpload,
} from '@colanode/client/lib/mappers';
import { fetchNode, fetchUserStorageUsed } from '@colanode/client/lib/utils';
import { fetchNode } from '@colanode/client/lib/utils';
import { MutationError, MutationErrorCode } from '@colanode/client/mutations';
import { AppService } from '@colanode/client/services/app-service';
import { WorkspaceService } from '@colanode/client/services/workspaces/workspace-service';
@@ -82,31 +82,16 @@ export class FileService {
}
const fileSize = BigInt(tempFile.size);
const maxFileSize = BigInt(this.workspace.maxFileSize);
if (fileSize > maxFileSize) {
throw new MutationError(
MutationErrorCode.FileTooLarge,
'The file you are trying to upload is too large. The maximum file size is ' +
formatBytes(maxFileSize)
);
}
const storageUsed = await fetchUserStorageUsed(
this.workspace.database,
this.workspace.userId
);
const storageLimit = BigInt(this.workspace.storageLimit);
if (storageUsed + fileSize > storageLimit) {
throw new MutationError(
MutationErrorCode.StorageLimitExceeded,
'You have reached your storage limit. You have used ' +
formatBytes(storageUsed) +
' and you are trying to upload a file of size ' +
formatBytes(fileSize) +
'. Your storage limit is ' +
formatBytes(storageLimit)
);
if (this.workspace.maxFileSize) {
const maxFileSize = BigInt(this.workspace.maxFileSize);
if (fileSize > maxFileSize) {
throw new MutationError(
MutationErrorCode.FileTooLarge,
'The file you are trying to upload is too large. The maximum file size is ' +
formatBytes(maxFileSize)
);
}
}
const node = await fetchNode(this.workspace.database, parentId);

View File

@@ -19,7 +19,7 @@ import { RadarService } from '@colanode/client/services/workspaces/radar-service
import { SyncService } from '@colanode/client/services/workspaces/sync-service';
import { UserService } from '@colanode/client/services/workspaces/user-service';
import { Workspace } from '@colanode/client/types/workspaces';
import { createDebugger, WorkspaceRole } from '@colanode/core';
import { createDebugger, WorkspaceRole, WorkspaceStatus } from '@colanode/core';
const debug = createDebugger('desktop:service:workspace');
@@ -83,12 +83,12 @@ export class WorkspaceService {
return this.workspace.role;
}
public get maxFileSize(): string {
public get maxFileSize(): string | null | undefined {
return this.workspace.maxFileSize;
}
public get storageLimit(): string {
return this.workspace.storageLimit;
public get status(): WorkspaceStatus {
return this.workspace.status;
}
public updateWorkspace(workspace: Workspace): void {

View File

@@ -1,4 +1,4 @@
import { WorkspaceRole } from '@colanode/core';
import { WorkspaceRole, WorkspaceStatus } from '@colanode/core';
export type Workspace = {
userId: string;
@@ -8,8 +8,8 @@ export type Workspace = {
avatar?: string | null;
accountId: string;
role: WorkspaceRole;
maxFileSize: string;
storageLimit: string;
maxFileSize?: string | null;
status: WorkspaceStatus;
};
export type SidebarMenuType = 'chats' | 'spaces' | 'settings';

View File

@@ -37,5 +37,4 @@ export * from './types/mentions';
export * from './types/avatars';
export * from './types/build';
export * from './lib/servers';
export * from './types/storage';
export * from './types/auth';

View File

@@ -7,6 +7,7 @@ export enum ApiHeader {
}
export enum ApiErrorCode {
WorkspaceReadonly = 'workspace_readonly',
AccountNotFound = 'account_not_found',
DeviceNotFound = 'device_not_found',
AccountMismatch = 'account_mismatch',

View File

@@ -1,50 +0,0 @@
import { z } from 'zod/v4';
export const workspaceStorageCounterSchema = z.object({
count: z.string(),
size: z.string(),
});
export type WorkspaceStorageCounter = z.infer<
typeof workspaceStorageCounterSchema
>;
export const workspaceStorageUsageSchema = z.object({
uploads: workspaceStorageCounterSchema,
nodes: workspaceStorageCounterSchema,
documents: workspaceStorageCounterSchema,
});
export type WorkspaceStorageUsage = z.infer<typeof workspaceStorageUsageSchema>;
export const workspaceStorageUserSchema = z.object({
id: z.string(),
storageLimit: z.string(),
maxFileSize: z.string(),
usage: workspaceStorageUsageSchema,
});
export type WorkspaceStorageUser = z.infer<typeof workspaceStorageUserSchema>;
export const workspaceStorageUsersGetOutputSchema = z.object({
users: z.array(workspaceStorageUserSchema),
});
export type WorkspaceStorageUsersGetOutput = z.infer<
typeof workspaceStorageUsersGetOutputSchema
>;
const workspaceUsageSchema = z.object({
storageLimit: z.string().nullable().optional(),
maxFileSize: z.string().nullable().optional(),
usage: workspaceStorageUsageSchema,
});
export const workspaceStorageGetOutputSchema = z.object({
user: workspaceStorageUserSchema,
workspace: workspaceUsageSchema.optional(),
});
export type WorkspaceStorageGetOutput = z.infer<
typeof workspaceStorageGetOutputSchema
>;

View File

@@ -3,6 +3,7 @@ import { z } from 'zod/v4';
export enum WorkspaceStatus {
Active = 1,
Inactive = 2,
Readonly = 3,
}
export enum UserStatus {
@@ -32,8 +33,6 @@ export const workspaceUserOutputSchema = z.object({
id: z.string(),
accountId: z.string(),
role: workspaceRoleSchema,
storageLimit: z.string(),
maxFileSize: z.string(),
});
export type WorkspaceUserOutput = z.infer<typeof workspaceUserOutputSchema>;
@@ -44,6 +43,8 @@ export const workspaceOutputSchema = z.object({
description: z.string().nullable().optional(),
avatar: z.string().nullable().optional(),
user: workspaceUserOutputSchema,
status: z.enum(WorkspaceStatus),
maxFileSize: z.string().optional(),
});
export type WorkspaceOutput = z.infer<typeof workspaceOutputSchema>;

View File

@@ -1,6 +1,5 @@
import { count, inArray, useLiveQuery } from '@tanstack/react-db';
import {
Cylinder,
Download,
LogOut,
Palette,
@@ -63,15 +62,6 @@ export const SidebarSettings = () => {
/>
)}
</Link>
<Link from="/workspace/$userId" to="storage">
{({ isActive }) => (
<SidebarSettingsItem
title="Storage"
icon={Cylinder}
isActive={isActive}
/>
)}
</Link>
<Link from="/workspace/$userId" to="uploads">
{({ isActive }) => (
<SidebarSettingsItem

View File

@@ -1,129 +0,0 @@
import { Info } from 'lucide-react';
import { formatBytes, WorkspaceStorageUsage } from '@colanode/core';
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from '@colanode/ui/components/ui/tooltip';
import { bigintToPercent } from '@colanode/ui/lib/utils';
interface StorageStatsProps {
storageLimit: string | null | undefined;
usage: WorkspaceStorageUsage;
}
const numberFormatter = new Intl.NumberFormat();
const formatCount = (value: bigint) => {
const asNumber = Number(value);
if (Number.isSafeInteger(asNumber)) {
return numberFormatter.format(asNumber);
}
return value.toString();
};
export const StorageStats = ({ storageLimit, usage }: StorageStatsProps) => {
const uploadsUsed = BigInt(usage.uploads.size ?? '0');
const limit = storageLimit ? BigInt(storageLimit) : null;
const usedPercentage = limit ? bigintToPercent(limit, uploadsUsed) : 0;
const clampedUsedPercentage = Math.min(usedPercentage, 100);
const remaining = limit
? limit > uploadsUsed
? limit - uploadsUsed
: BigInt(0)
: null;
const filesSize = BigInt(usage.uploads.size ?? '0');
const filesCount = BigInt(usage.uploads.count ?? '0');
const nodesSize = BigInt(usage.nodes.size ?? '0');
const nodesCount = BigInt(usage.nodes.count ?? '0');
const documentsSize = BigInt(usage.documents.size ?? '0');
const contentSize = nodesSize + documentsSize;
const contentCount = nodesCount;
return (
<div className="grid gap-4 md:grid-cols-2 md:items-stretch">
<div className="rounded-xl border p-6 shadow-sm">
<p className="text-sm font-medium text-muted-foreground">
Total storage
</p>
<div className="mt-4 flex flex-col gap-1">
<span className="text-3xl font-semibold tracking-tight">
{formatBytes(uploadsUsed)}
</span>
<span className="text-sm text-muted-foreground">
of {limit ? formatBytes(limit) : 'Unlimited'}
</span>
</div>
<div className="mt-6 h-3 w-full overflow-hidden rounded-full bg-secondary">
<div
className="h-full rounded-full bg-primary transition-all"
style={{ width: `${clampedUsedPercentage}%` }}
/>
</div>
<div className="mt-3 flex items-center justify-between text-xs text-muted-foreground">
<span>
{clampedUsedPercentage.toFixed(1)}%{' '}
{limit ? 'of your limit' : 'used'}
</span>
{remaining !== null ? (
<span>{formatBytes(remaining)} remaining</span>
) : (
<span>No limit set</span>
)}
</div>
</div>
<div className="flex flex-col gap-4">
{[
{
label: 'Content',
size: formatBytes(contentSize),
count: formatCount(contentCount),
info: 'Messages, documents, databases and other items created directly in Colanode.',
},
{
label: 'Files',
size: formatBytes(filesSize),
count: formatCount(filesCount),
info: 'Includes every file uploaded to your workspace.',
},
].map((card) => (
<div
key={card.label}
className="flex flex-1 flex-col justify-center rounded-xl border p-4 shadow-sm"
>
<div className="flex items-center gap-2">
<p className="text-xs text-muted-foreground">{card.label}</p>
{card.info ? (
<Tooltip>
<TooltipTrigger asChild>
<button
type="button"
aria-label={`More info about ${card.label}`}
className="text-muted-foreground transition hover:text-foreground"
>
<Info className="h-3.5 w-3.5" />
</button>
</TooltipTrigger>
<TooltipContent>
<p>{card.info}</p>
</TooltipContent>
</Tooltip>
) : null}
</div>
<div className="mt-2 flex items-end justify-between">
<span className="text-2xl font-semibold">{card.size}</span>
<span className="text-xs text-muted-foreground">
{card.count} items
</span>
</div>
</div>
))}
</div>
</div>
);
};

View File

@@ -1,8 +0,0 @@
import { BreadcrumbItem } from '@colanode/ui/components/layouts/containers/breadcrumb-item';
import { defaultIcons } from '@colanode/ui/lib/assets';
export const WorkspaceStorageBreadcrumb = () => {
return (
<BreadcrumbItem id="storage" avatar={defaultIcons.storage} name="Storage" />
);
};

View File

@@ -1,38 +0,0 @@
import { Cloud, ExternalLink } from 'lucide-react';
import { isColanodeDomain } from '@colanode/core';
import { Button } from '@colanode/ui/components/ui/button';
import { useServer } from '@colanode/ui/contexts/server';
const CLOUD_URL = 'https://cloud.colanode.com';
export const WorkspaceStorageCloud = () => {
const server = useServer();
if (!isColanodeDomain(server.domain)) {
return null;
}
return (
<div className="w-full flex flex-row items-center rounded-xl border p-6 shadow-sm gap-4">
<Cloud className="size-6" />
<div className="flex-1">
<h3 className="text-lg font-semibold tracking-tight">
Upgrade your Colanode Cloud plan
</h3>
<p className="text-sm text-muted-foreground">
Get more storage and higher limits for your workspace.
</p>
</div>
<Button
variant="default"
size="sm"
onClick={() => {
window.colanode.openExternalUrl(CLOUD_URL);
}}
>
<ExternalLink className="size-4" />
<span>Manage Plan</span>
</Button>
</div>
);
};

View File

@@ -1,15 +0,0 @@
import { Container } from '@colanode/ui/components/layouts/containers/container';
import { WorkspaceStorageBreadcrumb } from '@colanode/ui/components/workspaces/storage/workspace-storage-breadcrumb';
import { WorkspaceStorageStats } from '@colanode/ui/components/workspaces/storage/workspace-storage-stats';
import { WorkspaceStorageUsers } from '@colanode/ui/components/workspaces/storage/workspace-storage-users';
export const WorkspaceStorageContainer = () => {
return (
<Container type="full" breadcrumb={<WorkspaceStorageBreadcrumb />}>
<div className="max-w-4xl space-y-10">
<WorkspaceStorageStats />
<WorkspaceStorageUsers />
</div>
</Container>
);
};

View File

@@ -1,100 +0,0 @@
import { BadgeAlert } from 'lucide-react';
import { Button } from '@colanode/ui/components/ui/button';
import { Spinner } from '@colanode/ui/components/ui/spinner';
import { StorageStats } from '@colanode/ui/components/workspaces/storage/storage-stats';
import { WorkspaceStorageCloud } from '@colanode/ui/components/workspaces/storage/workspace-storage-cloud';
import { useWorkspace } from '@colanode/ui/contexts/workspace';
import { useQuery } from '@colanode/ui/hooks/use-query';
export const WorkspaceStorageStats = () => {
const workspace = useWorkspace();
const canManageStorage =
workspace.role === 'owner' || workspace.role === 'admin';
const storageQuery = useQuery({
type: 'workspace.storage.get',
userId: workspace.userId,
});
const showUserError = storageQuery.isError || !storageQuery.data?.user;
const showWorkspaceError =
storageQuery.isError || !storageQuery.data?.workspace;
return (
<div className="max-w-4xl space-y-10">
<div className="space-y-4">
<div>
<h2 className="text-2xl font-semibold tracking-tight">My storage</h2>
<p className="mt-2 text-sm text-muted-foreground">
Your personal storage usage.
</p>
</div>
{storageQuery.isPending ? (
<div className="flex items-center gap-4 text-sm text-muted-foreground">
<Spinner className="size-5" />
<span>Loading storage data from the server...</span>
</div>
) : showUserError ? (
<div className="space-y-2 text-sm text-muted-foreground">
<p>Couldn't load your storage information. Please try again.</p>
<Button
variant="outline"
size="sm"
onClick={() => storageQuery.refetch()}
>
Try again
</Button>
</div>
) : storageQuery.data?.user?.usage ? (
<StorageStats
storageLimit={storageQuery.data.user.storageLimit}
usage={storageQuery.data.user.usage}
/>
) : null}
</div>
{canManageStorage && (
<div className="space-y-4">
<div>
<h2 className="text-2xl font-semibold tracking-tight">
Workspace storage
</h2>
<p className="mt-2 text-sm text-muted-foreground">
Total storage usage for the workspace.
</p>
</div>
{storageQuery.isPending ? (
<div className="flex items-center gap-4 text-sm text-muted-foreground">
<Spinner className="size-5" />
<span>Loading storage data from the server...</span>
</div>
) : showWorkspaceError ? (
<div className="flex flex-col gap-4 text-sm text-muted-foreground">
<div className="flex items-center gap-4">
<BadgeAlert className="size-8 text-red-400" />
<span>
Couldn't load workspace storage information. Please try again.
</span>
</div>
<Button
variant="outline"
size="sm"
onClick={() => storageQuery.refetch()}
>
Retry
</Button>
</div>
) : storageQuery.data?.workspace?.usage ? (
<>
<StorageStats
storageLimit={storageQuery.data.workspace.storageLimit ?? null}
usage={storageQuery.data.workspace.usage}
/>
<WorkspaceStorageCloud />
</>
) : null}
</div>
)}
</div>
);
};

View File

@@ -1,6 +0,0 @@
import { TabItem } from '@colanode/ui/components/layouts/tabs/tab-item';
import { defaultIcons } from '@colanode/ui/lib/assets';
export const WorkspaceStorageTab = () => {
return <TabItem id="storage" avatar={defaultIcons.storage} name="Storage" />;
};

View File

@@ -1,137 +0,0 @@
import { eq, useLiveQuery } from '@tanstack/react-db';
import { Settings } from 'lucide-react';
import { useState } from 'react';
import { formatBytes, WorkspaceStorageUser } from '@colanode/core';
import { Avatar } from '@colanode/ui/components/avatars/avatar';
import { Button } from '@colanode/ui/components/ui/button';
import { TableCell, TableRow } from '@colanode/ui/components/ui/table';
import { WorkspaceStorageUserUpdateDialog } from '@colanode/ui/components/workspaces/storage/workspace-storage-user-update-dialog';
import { useWorkspace } from '@colanode/ui/contexts/workspace';
import { bigintToPercent, cn } from '@colanode/ui/lib/utils';
interface UserStorageProgressBarProps {
storageUsed: string;
storageLimit: string;
}
const UserStorageProgressBar = ({
storageUsed,
storageLimit,
}: UserStorageProgressBarProps) => {
const percentage = bigintToPercent(BigInt(storageLimit), BigInt(storageUsed));
const getBarColor = () => {
if (percentage >= 90) return 'bg-red-500';
if (percentage >= 70) return 'bg-orange-500';
return 'bg-green-500';
};
return (
<div className="space-y-1">
<div className="flex items-center justify-between text-sm">
<span className="font-medium">{formatBytes(BigInt(storageUsed))}</span>
<span className="text-muted-foreground">
({percentage.toFixed(1)}%)
</span>
</div>
<div className="w-full h-2 bg-secondary rounded-full overflow-hidden">
<div
className={cn('h-full transition-all duration-300', getBarColor())}
style={{ width: `${Math.min(percentage, 100)}%` }}
/>
</div>
</div>
);
};
interface WorkspaceStorageUserRowProps {
user: WorkspaceStorageUser;
onUpdate: () => void;
}
export const WorkspaceStorageUserRow = ({
user,
onUpdate,
}: WorkspaceStorageUserRowProps) => {
const workspace = useWorkspace();
const [openUpdateDialog, setOpenUpdateDialog] = useState(false);
const userQuery = useLiveQuery(
(q) =>
q
.from({ users: workspace.collections.users })
.where(({ users }) => eq(users.id, user.id))
.select(({ users }) => ({
id: users.id,
name: users.name,
avatar: users.avatar,
email: users.email,
}))
.findOne(),
[workspace.userId, user.id]
);
const name = userQuery.data?.name ?? 'Unknown';
const email = userQuery.data?.email ?? '';
const avatar = userQuery.data?.avatar ?? null;
const storageLimitBytes = BigInt(user.storageLimit);
const maxFileSizeBytes = user.maxFileSize ? BigInt(user.maxFileSize) : null;
const storageUsed = user.usage.uploads.size ?? '0';
return (
<>
<TableRow>
<TableCell>
<div className="flex items-center space-x-3">
<Avatar id={user.id} name={name} avatar={avatar} />
<div className="grow min-w-0">
<p className="text-sm font-medium leading-none truncate">
{name}
</p>
<p className="text-sm text-muted-foreground truncate">{email}</p>
</div>
</div>
</TableCell>
<TableCell className="text-center">
<span className="text-sm font-medium">
{maxFileSizeBytes ? formatBytes(maxFileSizeBytes) : '#'}
</span>
</TableCell>
<TableCell className="text-center">
<span className="text-sm font-medium">
{formatBytes(storageLimitBytes)}
</span>
</TableCell>
<TableCell className="min-w-[200px] text-center">
<UserStorageProgressBar
storageLimit={user.storageLimit}
storageUsed={storageUsed}
/>
</TableCell>
<TableCell className="w-10 text-right">
<Button
variant="ghost"
size="icon"
className="h-8 w-8"
onClick={() => setOpenUpdateDialog(true)}
>
<Settings className="h-4 w-4" />
</Button>
</TableCell>
</TableRow>
{openUpdateDialog && (
<WorkspaceStorageUserUpdateDialog
user={user}
open={openUpdateDialog}
onOpenChange={setOpenUpdateDialog}
onUpdate={() => {
onUpdate();
setOpenUpdateDialog(false);
}}
/>
)}
</>
);
};

View File

@@ -1,42 +0,0 @@
import { WorkspaceStorageUser } from '@colanode/core';
import {
Table,
TableBody,
TableHead,
TableHeader,
TableRow,
} from '@colanode/ui/components/ui/table';
import { WorkspaceStorageUserRow } from '@colanode/ui/components/workspaces/storage/workspace-storage-user-row';
interface WorkspaceStorageUserTableProps {
users: WorkspaceStorageUser[];
onUsersUpdated: () => void;
}
export const WorkspaceStorageUserTable = ({
users,
onUsersUpdated,
}: WorkspaceStorageUserTableProps) => {
return (
<Table>
<TableHeader>
<TableRow>
<TableHead>User</TableHead>
<TableHead className="text-center">File Size Limit</TableHead>
<TableHead className="text-center">Total Storage</TableHead>
<TableHead className="text-center">Used Storage</TableHead>
<TableHead className="w-10 text-right"></TableHead>
</TableRow>
</TableHeader>
<TableBody>
{users.map((user) => (
<WorkspaceStorageUserRow
key={user.id}
user={user}
onUpdate={onUsersUpdated}
/>
))}
</TableBody>
</Table>
);
};

View File

@@ -1,355 +0,0 @@
import { eq, useLiveQuery } from '@tanstack/react-db';
import { useForm } from '@tanstack/react-form';
import { Check, ChevronDown } from 'lucide-react';
import { useState } from 'react';
import { toast } from 'sonner';
import { z } from 'zod/v4';
import { WorkspaceStorageUser } from '@colanode/core';
import { Avatar } from '@colanode/ui/components/avatars/avatar';
import { Button } from '@colanode/ui/components/ui/button';
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from '@colanode/ui/components/ui/dialog';
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from '@colanode/ui/components/ui/dropdown-menu';
import {
Field,
FieldError,
FieldGroup,
FieldLabel,
} from '@colanode/ui/components/ui/field';
import { Input } from '@colanode/ui/components/ui/input';
import { Spinner } from '@colanode/ui/components/ui/spinner';
import { useWorkspace } from '@colanode/ui/contexts/workspace';
import { useMutation } from '@colanode/ui/hooks/use-mutation';
const UNITS = [
{ label: 'TB', value: 'TB', bytes: 1024 ** 4 },
{ label: 'GB', value: 'GB', bytes: 1024 ** 3 },
{ label: 'MB', value: 'MB', bytes: 1024 ** 2 },
{ label: 'KB', value: 'KB', bytes: 1024 },
{ label: 'Bytes', value: 'bytes', bytes: 1 },
];
const convertBytesToUnit = (bytes: string) => {
const bytesNum = parseInt(bytes);
if (isNaN(bytesNum) || bytesNum === 0) {
return { value: '0', unit: 'bytes' };
}
for (const unit of UNITS) {
if (bytesNum >= unit.bytes || unit.value === 'bytes') {
const value = bytesNum / unit.bytes;
return {
value: value % 1 === 0 ? value.toString() : value.toFixed(2),
unit: unit.value,
};
}
}
return { value: '0', unit: 'bytes' };
};
const convertUnitToBytes = (value: string, unit: string): string => {
const unitData = UNITS.find((u) => u.value === unit);
const selectedUnit = unitData || UNITS[UNITS.length - 1]!;
const numValue = parseFloat(value || '0');
return Math.round(numValue * selectedUnit.bytes).toString();
};
const formatBytes = (bytes: string): string => {
const num = parseInt(bytes);
if (isNaN(num)) return '0';
return new Intl.NumberFormat().format(num);
};
const formSchema = z.object({
storageLimit: z.string().min(1, 'Storage limit is required'),
maxFileSize: z.string().min(1, 'Max file size is required'),
});
interface WorkspaceStorageUserUpdateDialogProps {
user: WorkspaceStorageUser;
open: boolean;
onOpenChange: (open: boolean) => void;
onUpdate: () => void;
}
export const WorkspaceStorageUserUpdateDialog = ({
user,
open,
onOpenChange,
onUpdate,
}: WorkspaceStorageUserUpdateDialogProps) => {
const workspace = useWorkspace();
const { mutate, isPending } = useMutation();
const initialStorageLimit = convertBytesToUnit(user.storageLimit);
const initialMaxFileSize = convertBytesToUnit(user.maxFileSize);
const [storageLimitUnit, setStorageLimitUnit] = useState(
initialStorageLimit.unit
);
const [maxFileSizeUnit, setMaxFileSizeUnit] = useState(
initialMaxFileSize.unit
);
const userQuery = useLiveQuery(
(q) =>
q
.from({ users: workspace.collections.users })
.where(({ users }) => eq(users.id, user.id))
.select(({ users }) => ({
id: users.id,
name: users.name,
avatar: users.avatar,
email: users.email,
}))
.findOne(),
[workspace.userId, user.id]
);
const name = userQuery.data?.name ?? 'Unknown';
const email = userQuery.data?.email ?? '';
const avatar = userQuery.data?.avatar ?? null;
const form = useForm({
defaultValues: {
storageLimit: initialStorageLimit.value,
maxFileSize: initialMaxFileSize.value,
},
validators: {
onSubmit: formSchema,
},
onSubmit: async ({ value }) => {
if (isPending) {
return;
}
const apiValues = {
storageLimit: convertUnitToBytes(value.storageLimit, storageLimitUnit),
maxFileSize: convertUnitToBytes(value.maxFileSize, maxFileSizeUnit),
};
if (BigInt(apiValues.maxFileSize) > BigInt(apiValues.storageLimit)) {
toast.error('Max file size cannot be larger than storage limit');
return;
}
mutate({
input: {
type: 'user.storage.update',
accountId: workspace.accountId,
workspaceId: workspace.workspaceId,
userId: user.id,
storageLimit: apiValues.storageLimit,
maxFileSize: apiValues.maxFileSize,
},
onSuccess: () => {
toast.success('User storage settings updated');
form.reset();
onUpdate();
},
onError: (error) => {
toast.error(error.message);
},
});
},
});
const handleCancel = () => {
form.reset();
setStorageLimitUnit(initialStorageLimit.unit);
setMaxFileSizeUnit(initialMaxFileSize.unit);
onOpenChange(false);
};
const storageLimitUnitData = UNITS.find((u) => u.value === storageLimitUnit);
const storageLimitUnitLabel = storageLimitUnitData?.label ?? 'bytes';
const maxFileSizeUnitData = UNITS.find((u) => u.value === maxFileSizeUnit);
const maxFileSizeUnitLabel = maxFileSizeUnitData?.label ?? 'bytes';
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent>
<DialogHeader>
<DialogTitle>Update storage settings</DialogTitle>
<DialogDescription>
Update the storage limits for this user
</DialogDescription>
</DialogHeader>
<div className="flex items-center space-x-3 py-4 border-b">
<Avatar id={user.id} name={name} avatar={avatar} />
<div className="grow min-w-0">
<p className="text-sm font-medium leading-none truncate">{name}</p>
<p className="text-sm text-muted-foreground truncate">{email}</p>
</div>
</div>
<form
className="flex flex-col"
onSubmit={(e) => {
e.preventDefault();
form.handleSubmit();
}}
>
<div className="grow space-y-6 py-2 pb-4">
<FieldGroup>
<form.Field
name="storageLimit"
children={(field) => {
const isInvalid =
field.state.meta.isTouched && !field.state.meta.isValid;
return (
<Field data-invalid={isInvalid}>
<FieldLabel htmlFor={field.name}>
Storage Limit
</FieldLabel>
<div className="flex space-x-2">
<Input
id={field.name}
name={field.name}
type="number"
placeholder="5"
value={field.state.value}
onBlur={field.handleBlur}
onChange={(e) => field.handleChange(e.target.value)}
aria-invalid={isInvalid}
className="flex-1"
min="1"
step="1"
/>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="outline"
className="w-20 justify-between"
>
{storageLimitUnitLabel}
<ChevronDown className="h-4 w-4" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
{UNITS.map((unit) => (
<DropdownMenuItem
key={unit.value}
onClick={() => setStorageLimitUnit(unit.value)}
className="flex items-center justify-between"
>
<span>{unit.label}</span>
{storageLimitUnit === unit.value && (
<Check className="h-4 w-4" />
)}
</DropdownMenuItem>
))}
</DropdownMenuContent>
</DropdownMenu>
</div>
<div className="text-xs text-muted-foreground">
={' '}
{formatBytes(
convertUnitToBytes(
field.state.value || '0',
storageLimitUnit
)
)}{' '}
bytes
</div>
{isInvalid && (
<FieldError errors={field.state.meta.errors} />
)}
</Field>
);
}}
/>
<form.Field
name="maxFileSize"
children={(field) => {
const isInvalid =
field.state.meta.isTouched && !field.state.meta.isValid;
return (
<Field data-invalid={isInvalid}>
<FieldLabel htmlFor={field.name}>
Max File Size
</FieldLabel>
<div className="flex space-x-2">
<Input
id={field.name}
name={field.name}
type="number"
placeholder="10"
value={field.state.value || ''}
onBlur={field.handleBlur}
onChange={(e) => field.handleChange(e.target.value)}
aria-invalid={isInvalid}
className="flex-1"
min="1"
step="1"
/>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="outline"
className="w-20 justify-between"
>
{maxFileSizeUnitLabel}
<ChevronDown className="h-4 w-4" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
{UNITS.map((unit) => (
<DropdownMenuItem
key={unit.value}
onClick={() => setMaxFileSizeUnit(unit.value)}
className="flex items-center justify-between"
>
<span>{unit.label}</span>
{maxFileSizeUnit === unit.value && (
<Check className="h-4 w-4" />
)}
</DropdownMenuItem>
))}
</DropdownMenuContent>
</DropdownMenu>
</div>
<div className="text-xs text-muted-foreground">
={' '}
{formatBytes(
convertUnitToBytes(
field.state.value || '0',
maxFileSizeUnit
)
)}{' '}
bytes
</div>
{isInvalid && (
<FieldError errors={field.state.meta.errors} />
)}
</Field>
);
}}
/>
</FieldGroup>
</div>
<DialogFooter>
<Button type="button" variant="outline" onClick={handleCancel}>
Cancel
</Button>
<Button type="submit" disabled={isPending}>
{isPending && <Spinner className="mr-1" />}
Update
</Button>
</DialogFooter>
</form>
</DialogContent>
</Dialog>
);
};

View File

@@ -1,107 +0,0 @@
import { useInfiniteQuery } from '@tanstack/react-query';
import { BadgeAlert } from 'lucide-react';
import { InView } from 'react-intersection-observer';
import { Button } from '@colanode/ui/components/ui/button';
import { Separator } from '@colanode/ui/components/ui/separator';
import { Spinner } from '@colanode/ui/components/ui/spinner';
import { WorkspaceStorageUserTable } from '@colanode/ui/components/workspaces/storage/workspace-storage-user-table';
import { useWorkspace } from '@colanode/ui/contexts/workspace';
const USERS_PER_PAGE = 100;
export const WorkspaceStorageUsers = () => {
const workspace = useWorkspace();
const canManageStorage =
workspace.role === 'owner' || workspace.role === 'admin';
const usersQuery = useInfiniteQuery({
queryKey: ['workspace-storage-users', workspace.userId],
initialPageParam: undefined as string | undefined,
queryFn: ({ pageParam }) =>
window.colanode.executeQuery({
type: 'workspace.storage.users.get',
userId: workspace.userId,
limit: USERS_PER_PAGE,
cursor: pageParam,
}),
getNextPageParam: (lastPage) => {
if (lastPage.users.length < USERS_PER_PAGE) {
return undefined;
}
const lastUser = lastPage.users[lastPage.users.length - 1];
return lastUser?.id;
},
});
const users = usersQuery.data?.pages.flatMap((page) => page.users) ?? [];
if (!canManageStorage) {
return null;
}
const handleUsersUpdated = () => {
usersQuery.refetch();
};
return (
<div className="space-y-6">
<div>
<h2 className="text-2xl font-semibold tracking-tight">Users</h2>
<p className="mt-2 text-sm text-muted-foreground">
View and manage storage usage for each workspace user.
</p>
<Separator className="mt-3" />
</div>
{usersQuery.isPending && users.length === 0 ? (
<div className="flex items-center gap-4 text-sm text-muted-foreground">
<Spinner className="size-5" />
<span>Loading user storage information...</span>
</div>
) : usersQuery.isError && users.length === 0 ? (
<div className="flex flex-col gap-4 text-sm text-muted-foreground">
<div className="flex items-center gap-4">
<BadgeAlert className="size-8 text-red-400" />
<span>
Couldn't load workspace user storage details. Please try again.
</span>
</div>
<Button
variant="outline"
size="sm"
onClick={() => usersQuery.refetch()}
>
Retry
</Button>
</div>
) : users.length === 0 ? (
<div className="text-sm text-muted-foreground">No users found.</div>
) : (
<>
<WorkspaceStorageUserTable
users={users}
onUsersUpdated={handleUsersUpdated}
/>
<div className="flex items-center justify-center py-4">
{usersQuery.isFetchingNextPage && <Spinner className="size-4" />}
</div>
{usersQuery.hasNextPage && (
<InView
rootMargin="200px"
onChange={(inView) => {
if (
inView &&
usersQuery.hasNextPage &&
!usersQuery.isFetchingNextPage
) {
usersQuery.fetchNextPage();
}
}}
/>
)}
</>
)}
</div>
);
};

View File

@@ -0,0 +1,51 @@
import { ExternalLink } from 'lucide-react';
import { isColanodeDomain } from '@colanode/core';
import { Button } from '@colanode/ui/components/ui/button';
import { Separator } from '@colanode/ui/components/ui/separator';
import { useServer } from '@colanode/ui/contexts/server';
import { useWorkspace } from '@colanode/ui/contexts/workspace';
const CLOUD_URL = 'https://cloud.colanode.com';
export const WorkspaceCloud = () => {
const server = useServer();
const workspace = useWorkspace();
if (workspace.role !== 'owner') {
return null;
}
if (!isColanodeDomain(server.domain)) {
return null;
}
return (
<div className="space-y-6">
<div>
<h2 className="text-2xl font-semibold tracking-tight">Cloud Plan</h2>
<Separator className="mt-3" />
</div>
<div className="w-full flex flex-row items-center gap-4">
<div className="flex-1 space-y-1">
<h3 className="font-semibold">Upgrade your Colanode Cloud plan</h3>
<p className="text-sm text-muted-foreground">
Get more storage and higher limits for your workspace.
</p>
</div>
<div className="shrink-0">
<Button
variant="default"
size="sm"
onClick={() => {
window.colanode.openExternalUrl(CLOUD_URL);
}}
>
<ExternalLink className="size-4" />
<span>Manage Plan</span>
</Button>
</div>
</div>
</div>
);
};

View File

@@ -24,7 +24,7 @@ export const WorkspaceDelete = () => {
return (
<>
<div className="flex items-center justify-between gap-6">
<div className="flex-1 space-y-2">
<div className="flex-1 space-y-1">
<h3 className="font-semibold">Delete workspace</h3>
<p className="text-sm text-muted-foreground">
Once you delete a workspace, there is no going back. Please be

View File

@@ -4,6 +4,7 @@ import { toast } from 'sonner';
import { collections } from '@colanode/ui/collections';
import { Container } from '@colanode/ui/components/layouts/containers/container';
import { Separator } from '@colanode/ui/components/ui/separator';
import { WorkspaceCloud } from '@colanode/ui/components/workspaces/workspace-cloud';
import { WorkspaceDelete } from '@colanode/ui/components/workspaces/workspace-delete';
import { WorkspaceForm } from '@colanode/ui/components/workspaces/workspace-form';
import { WorkspaceNotFound } from '@colanode/ui/components/workspaces/workspace-not-found';
@@ -75,6 +76,8 @@ export const WorkspaceSettingsContainer = () => {
/>
</div>
<WorkspaceCloud />
<div className="space-y-6">
<div>
<h2 className="text-2xl font-semibold tracking-tight">

View File

@@ -41,10 +41,6 @@ import {
workspaceSettingsMaskRoute,
workspaceSettingsRoute,
} from '@colanode/ui/routes/workspace/settings';
import {
workspaceStorageMaskRoute,
workspaceStorageRoute,
} from '@colanode/ui/routes/workspace/storage';
import {
workspaceUploadsMaskRoute,
workspaceUploadsRoute,
@@ -64,7 +60,6 @@ export const routeTree = rootRoute.addChildren([
nodeRoute.addChildren([modalNodeRoute]),
workspaceDownloadsRoute,
workspaceUploadsRoute,
workspaceStorageRoute,
workspaceUsersRoute,
workspaceSettingsRoute,
accountSettingsRoute,
@@ -77,7 +72,6 @@ export const routeTree = rootRoute.addChildren([
nodeMaskRoute,
workspaceSettingsMaskRoute,
workspaceUsersMaskRoute,
workspaceStorageMaskRoute,
workspaceUploadsMaskRoute,
workspaceDownloadsMaskRoute,
accountSettingsMaskRoute,

View File

@@ -63,18 +63,6 @@ export const workspaceUsersRouteMask = createRouteMask({
},
});
export const workspaceStorageRouteMask = createRouteMask({
routeTree: routeTree,
from: '/workspace/$userId/storage',
to: '/$workspaceId/storage',
params: (ctx) => {
const workspace = collections.workspaces.get(ctx.userId);
return {
workspaceId: workspace?.workspaceId ?? 'unknown',
};
},
});
export const workspaceUploadsRouteMask = createRouteMask({
routeTree: routeTree,
from: '/workspace/$userId/uploads',
@@ -155,7 +143,6 @@ export const routeMasks = [
modalNodeRouteMask,
workspaceSettingsRouteMask,
workspaceUsersRouteMask,
workspaceStorageRouteMask,
workspaceUploadsRouteMask,
workspaceDownloadsRouteMask,
accountSettingsRouteMask,

View File

@@ -1,36 +0,0 @@
import { createRoute, redirect } from '@tanstack/react-router';
import { WorkspaceStorageContainer } from '@colanode/ui/components/workspaces/storage/workspace-storage-container';
import { WorkspaceStorageTab } from '@colanode/ui/components/workspaces/storage/workspace-storage-tab';
import { getWorkspaceUserId } from '@colanode/ui/routes/utils';
import {
workspaceRoute,
workspaceMaskRoute,
} from '@colanode/ui/routes/workspace';
export const workspaceStorageRoute = createRoute({
getParentRoute: () => workspaceRoute,
path: '/storage',
component: WorkspaceStorageContainer,
context: () => {
return {
tab: <WorkspaceStorageTab />,
};
},
});
export const workspaceStorageMaskRoute = createRoute({
getParentRoute: () => workspaceMaskRoute,
path: '/storage',
component: () => null,
beforeLoad: (ctx) => {
const userId = getWorkspaceUserId(ctx.params.workspaceId);
if (userId) {
throw redirect({
to: '/workspace/$userId/storage',
params: { userId },
replace: true,
});
}
},
});