Merge branch 'preview' of https://github.com/makeplane/plane into refactor/file-plugins

This commit is contained in:
Aaryan Khandelwal
2025-05-27 20:30:12 +05:30
243 changed files with 2706 additions and 812 deletions

2
.gitignore vendored
View File

@@ -53,6 +53,8 @@ mediafiles
.env
.DS_Store
logs/
htmlcov/
.coverage
node_modules/
assets/dist/

View File

@@ -26,16 +26,16 @@ export const InstanceAIForm: FC<IInstanceAIForm> = (props) => {
formState: { errors, isSubmitting },
} = useForm<AIFormValues>({
defaultValues: {
OPENAI_API_KEY: config["OPENAI_API_KEY"],
GPT_ENGINE: config["GPT_ENGINE"],
LLM_API_KEY: config["LLM_API_KEY"],
LLM_MODEL: config["LLM_MODEL"],
},
});
const aiFormFields: TControllerInputFormField[] = [
{
key: "GPT_ENGINE",
key: "LLM_MODEL",
type: "text",
label: "GPT_ENGINE",
label: "LLM Model",
description: (
<>
Choose an OpenAI engine.{" "}
@@ -49,12 +49,12 @@ export const InstanceAIForm: FC<IInstanceAIForm> = (props) => {
</a>
</>
),
placeholder: "gpt-3.5-turbo",
error: Boolean(errors.GPT_ENGINE),
placeholder: "gpt-4o-mini",
error: Boolean(errors.LLM_MODEL),
required: false,
},
{
key: "OPENAI_API_KEY",
key: "LLM_API_KEY",
type: "password",
label: "API key",
description: (
@@ -71,7 +71,7 @@ export const InstanceAIForm: FC<IInstanceAIForm> = (props) => {
</>
),
placeholder: "sk-asddassdfasdefqsdfasd23das3dasdcasd",
error: Boolean(errors.OPENAI_API_KEY),
error: Boolean(errors.LLM_API_KEY),
required: false,
},
];

25
apiserver/.coveragerc Normal file
View File

@@ -0,0 +1,25 @@
[run]
source = plane
omit =
*/tests/*
*/migrations/*
*/settings/*
*/wsgi.py
*/asgi.py
*/urls.py
manage.py
*/admin.py
*/apps.py
[report]
exclude_lines =
pragma: no cover
def __repr__
if self.debug:
raise NotImplementedError
if __name__ == .__main__.
pass
raise ImportError
[html]
directory = htmlcov

View File

@@ -11,6 +11,9 @@ from plane.app.views import (
AdvanceAnalyticsChartEndpoint,
DefaultAnalyticsEndpoint,
ProjectStatsEndpoint,
ProjectAdvanceAnalyticsEndpoint,
ProjectAdvanceAnalyticsStatsEndpoint,
ProjectAdvanceAnalyticsChartEndpoint,
)
@@ -67,4 +70,19 @@ urlpatterns = [
AdvanceAnalyticsChartEndpoint.as_view(),
name="advance-analytics-chart",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/advance-analytics/",
ProjectAdvanceAnalyticsEndpoint.as_view(),
name="project-advance-analytics",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/advance-analytics-stats/",
ProjectAdvanceAnalyticsStatsEndpoint.as_view(),
name="project-advance-analytics-stats",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/advance-analytics-charts/",
ProjectAdvanceAnalyticsChartEndpoint.as_view(),
name="project-advance-analytics-chart",
),
]

View File

@@ -205,6 +205,12 @@ from .analytic.advance import (
AdvanceAnalyticsChartEndpoint,
)
from .analytic.project_analytics import (
ProjectAdvanceAnalyticsEndpoint,
ProjectAdvanceAnalyticsStatsEndpoint,
ProjectAdvanceAnalyticsChartEndpoint,
)
from .notification.base import (
NotificationViewSet,
UnreadNotificationEndpoint,

View File

@@ -5,7 +5,6 @@ from django.db.models import QuerySet, Q, Count
from django.http import HttpRequest
from django.db.models.functions import TruncMonth
from django.utils import timezone
from datetime import timedelta
from plane.app.views.base import BaseAPIView
from plane.app.permissions import ROLE, allow_permission
from plane.db.models import (
@@ -19,10 +18,8 @@ from plane.db.models import (
Workspace,
CycleIssue,
ModuleIssue,
ProjectMember,
)
from django.db import models
from django.db.models import F, Case, When, Value
from django.db.models.functions import Concat
from plane.utils.build_chart import build_analytics_chart
from plane.utils.date_utils import (
get_analytics_filters,
@@ -75,32 +72,27 @@ class AdvanceAnalyticsEndpoint(AdvanceAnalyticsBaseView):
}
def get_overview_data(self) -> Dict[str, Dict[str, int]]:
members_query = WorkspaceMember.objects.filter(
workspace__slug=self._workspace_slug, is_active=True
)
if self.request.GET.get("project_ids", None):
project_ids = self.request.GET.get("project_ids", None)
project_ids = [str(project_id) for project_id in project_ids.split(",")]
members_query = ProjectMember.objects.filter(
project_id__in=project_ids, is_active=True
)
return {
"total_users": self.get_filtered_counts(
WorkspaceMember.objects.filter(
workspace__slug=self._workspace_slug, is_active=True
)
),
"total_users": self.get_filtered_counts(members_query),
"total_admins": self.get_filtered_counts(
WorkspaceMember.objects.filter(
workspace__slug=self._workspace_slug,
role=ROLE.ADMIN.value,
is_active=True,
)
members_query.filter(role=ROLE.ADMIN.value)
),
"total_members": self.get_filtered_counts(
WorkspaceMember.objects.filter(
workspace__slug=self._workspace_slug,
role=ROLE.MEMBER.value,
is_active=True,
)
members_query.filter(role=ROLE.MEMBER.value)
),
"total_guests": self.get_filtered_counts(
WorkspaceMember.objects.filter(
workspace__slug=self._workspace_slug,
role=ROLE.GUEST.value,
is_active=True,
)
members_query.filter(role=ROLE.GUEST.value)
),
"total_projects": self.get_filtered_counts(
Project.objects.filter(**self.filters["project_filters"])
@@ -113,30 +105,13 @@ class AdvanceAnalyticsEndpoint(AdvanceAnalyticsBaseView):
),
"total_intake": self.get_filtered_counts(
Issue.objects.filter(**self.filters["base_filters"]).filter(
issue_intake__isnull=False
issue_intake__status__in=["-2", "0"]
)
),
}
def get_work_items_stats(
self, cycle_id=None, module_id=None
) -> Dict[str, Dict[str, int]]:
"""
Returns work item stats for the workspace, or filtered by cycle_id or module_id if provided.
"""
base_queryset = None
if cycle_id is not None:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=cycle_issues)
elif module_id is not None:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=module_issues)
else:
base_queryset = Issue.issue_objects.filter(**self.filters["base_filters"])
def get_work_items_stats(self) -> Dict[str, Dict[str, int]]:
base_queryset = Issue.issue_objects.filter(**self.filters["base_filters"])
return {
"total_work_items": self.get_filtered_counts(base_queryset),
@@ -165,11 +140,8 @@ class AdvanceAnalyticsEndpoint(AdvanceAnalyticsBaseView):
status=status.HTTP_200_OK,
)
elif tab == "work-items":
# Optionally accept cycle_id or module_id as query params
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
return Response(
self.get_work_items_stats(cycle_id=cycle_id, module_id=module_id),
self.get_work_items_stats(),
status=status.HTTP_200_OK,
)
return Response({"message": "Invalid tab"}, status=status.HTTP_400_BAD_REQUEST)
@@ -188,7 +160,21 @@ class AdvanceAnalyticsStatsEndpoint(AdvanceAnalyticsBaseView):
)
return (
base_queryset.values("project_id", "project__name")
base_queryset.values("project_id", "project__name").annotate(
cancelled_work_items=Count("id", filter=Q(state__group="cancelled")),
completed_work_items=Count("id", filter=Q(state__group="completed")),
backlog_work_items=Count("id", filter=Q(state__group="backlog")),
un_started_work_items=Count("id", filter=Q(state__group="unstarted")),
started_work_items=Count("id", filter=Q(state__group="started")),
)
.order_by("project_id")
)
def get_work_items_stats(self) -> Dict[str, Dict[str, int]]:
base_queryset = Issue.issue_objects.filter(**self.filters["base_filters"])
return (
base_queryset
.values("project_id", "project__name")
.annotate(
cancelled_work_items=Count("id", filter=Q(state__group="cancelled")),
completed_work_items=Count("id", filter=Q(state__group="completed")),
@@ -199,100 +185,14 @@ class AdvanceAnalyticsStatsEndpoint(AdvanceAnalyticsBaseView):
.order_by("project_id")
)
def get_work_items_stats(
self, cycle_id=None, module_id=None, peek_view=False
) -> Dict[str, Dict[str, int]]:
base_queryset = None
if cycle_id is not None:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=cycle_issues)
elif module_id is not None:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=module_issues)
elif peek_view:
base_queryset = Issue.issue_objects.filter(**self.filters["base_filters"])
else:
base_queryset = Issue.issue_objects.filter(**self.filters["base_filters"])
return (
base_queryset.values("project_id", "project__name")
.annotate(
cancelled_work_items=Count(
"id", filter=Q(state__group="cancelled")
),
completed_work_items=Count(
"id", filter=Q(state__group="completed")
),
backlog_work_items=Count("id", filter=Q(state__group="backlog")),
un_started_work_items=Count(
"id", filter=Q(state__group="unstarted")
),
started_work_items=Count("id", filter=Q(state__group="started")),
)
.order_by("project_id")
)
return (
base_queryset.annotate(display_name=F("assignees__display_name"))
.annotate(assignee_id=F("assignees__id"))
.annotate(avatar=F("assignees__avatar"))
.annotate(
avatar_url=Case(
# If `avatar_asset` exists, use it to generate the asset URL
When(
assignees__avatar_asset__isnull=False,
then=Concat(
Value("/api/assets/v2/static/"),
"assignees__avatar_asset", # Assuming avatar_asset has an id or relevant field
Value("/"),
),
),
# If `avatar_asset` is None, fall back to using `avatar` field directly
When(
assignees__avatar_asset__isnull=True, then="assignees__avatar"
),
default=Value(None),
output_field=models.CharField(),
)
)
.values("display_name", "assignee_id", "avatar_url")
.annotate(
cancelled_work_items=Count(
"id", filter=Q(state__group="cancelled"), distinct=True
),
completed_work_items=Count(
"id", filter=Q(state__group="completed"), distinct=True
),
backlog_work_items=Count(
"id", filter=Q(state__group="backlog"), distinct=True
),
un_started_work_items=Count(
"id", filter=Q(state__group="unstarted"), distinct=True
),
started_work_items=Count(
"id", filter=Q(state__group="started"), distinct=True
),
)
.order_by("display_name")
)
@allow_permission([ROLE.ADMIN, ROLE.MEMBER], level="WORKSPACE")
def get(self, request: HttpRequest, slug: str) -> Response:
self.initialize_workspace(slug, type="chart")
type = request.GET.get("type", "work-items")
if type == "work-items":
# Optionally accept cycle_id or module_id as query params
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
peek_view = request.GET.get("peek_view", False)
return Response(
self.get_work_items_stats(
cycle_id=cycle_id, module_id=module_id, peek_view=peek_view
),
self.get_work_items_stats(),
status=status.HTTP_200_OK,
)
@@ -352,9 +252,7 @@ class AdvanceAnalyticsChartEndpoint(AdvanceAnalyticsBaseView):
for key, value in data.items()
]
def work_item_completion_chart(
self, cycle_id=None, module_id=None, peek_view=False
) -> Dict[str, Any]:
def work_item_completion_chart(self) -> Dict[str, Any]:
# Get the base queryset
queryset = (
Issue.issue_objects.filter(**self.filters["base_filters"])
@@ -364,143 +262,62 @@ class AdvanceAnalyticsChartEndpoint(AdvanceAnalyticsBaseView):
)
)
if cycle_id is not None and peek_view:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
cycle = Cycle.objects.filter(id=cycle_id).first()
if cycle and cycle.start_date:
start_date = cycle.start_date.date()
end_date = cycle.end_date.date()
else:
return {"data": [], "schema": {}}
queryset = cycle_issues
elif module_id is not None and peek_view:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
module = Module.objects.filter(id=module_id).first()
if module and module.start_date:
start_date = module.start_date
end_date = module.target_date
else:
return {"data": [], "schema": {}}
queryset = module_issues
elif peek_view:
project_ids_str = self.request.GET.get("project_ids")
if project_ids_str:
project_id_list = [
pid.strip() for pid in project_ids_str.split(",") if pid.strip()
]
else:
project_id_list = []
return {"data": [], "schema": {}}
project_id = project_id_list[0]
project = Project.objects.filter(id=project_id).first()
if project.created_at:
start_date = project.created_at.date().replace(day=1)
else:
return {"data": [], "schema": {}}
else:
workspace = Workspace.objects.get(slug=self._workspace_slug)
start_date = workspace.created_at.date().replace(day=1)
workspace = Workspace.objects.get(slug=self._workspace_slug)
start_date = workspace.created_at.date().replace(day=1)
if cycle_id or module_id:
# Get daily stats with optimized query
daily_stats = (
queryset.values("created_at__date")
.annotate(
created_count=Count("id"),
completed_count=Count(
"id", filter=Q(issue__state__group="completed")
),
)
.order_by("created_at__date")
# Apply date range filter if available
if self.filters["chart_period_range"]:
start_date, end_date = self.filters["chart_period_range"]
queryset = queryset.filter(
created_at__date__gte=start_date, created_at__date__lte=end_date
)
# Create a dictionary of existing stats with summed counts
stats_dict = {
stat["created_at__date"].strftime("%Y-%m-%d"): {
"created_count": stat["created_count"],
"completed_count": stat["completed_count"],
}
for stat in daily_stats
}
# Generate data for all days in the range
data = []
current_date = start_date
while current_date <= end_date:
date_str = current_date.strftime("%Y-%m-%d")
stats = stats_dict.get(
date_str, {"created_count": 0, "completed_count": 0}
)
data.append(
{
"key": date_str,
"name": date_str,
"count": stats["created_count"] + stats["completed_count"],
"completed_issues": stats["completed_count"],
"created_issues": stats["created_count"],
}
)
current_date += timedelta(days=1)
else:
# Apply date range filter if available
if self.filters["chart_period_range"]:
start_date, end_date = self.filters["chart_period_range"]
queryset = queryset.filter(
created_at__date__gte=start_date, created_at__date__lte=end_date
)
# Annotate by month and count
monthly_stats = (
queryset.annotate(month=TruncMonth("created_at"))
.values("month")
.annotate(
created_count=Count("id"),
completed_count=Count("id", filter=Q(state__group="completed")),
)
.order_by("month")
# Annotate by month and count
monthly_stats = (
queryset.annotate(month=TruncMonth("created_at"))
.values("month")
.annotate(
created_count=Count("id"),
completed_count=Count("id", filter=Q(state__group="completed")),
)
.order_by("month")
)
# Create dictionary of month -> counts
stats_dict = {
stat["month"].strftime("%Y-%m-%d"): {
"created_count": stat["created_count"],
"completed_count": stat["completed_count"],
}
for stat in monthly_stats
# Create dictionary of month -> counts
stats_dict = {
stat["month"].strftime("%Y-%m-%d"): {
"created_count": stat["created_count"],
"completed_count": stat["completed_count"],
}
for stat in monthly_stats
}
# Generate monthly data (ensure months with 0 count are included)
data = []
# include the current date at the end
end_date = timezone.now().date()
last_month = end_date.replace(day=1)
current_month = start_date
# Generate monthly data (ensure months with 0 count are included)
data = []
# include the current date at the end
end_date = timezone.now().date()
last_month = end_date.replace(day=1)
current_month = start_date
while current_month <= last_month:
date_str = current_month.strftime("%Y-%m-%d")
stats = stats_dict.get(
date_str, {"created_count": 0, "completed_count": 0}
while current_month <= last_month:
date_str = current_month.strftime("%Y-%m-%d")
stats = stats_dict.get(date_str, {"created_count": 0, "completed_count": 0})
data.append(
{
"key": date_str,
"name": date_str,
"count": stats["created_count"],
"completed_issues": stats["completed_count"],
"created_issues": stats["created_count"],
}
)
# Move to next month
if current_month.month == 12:
current_month = current_month.replace(
year=current_month.year + 1, month=1
)
data.append(
{
"key": date_str,
"name": date_str,
"count": stats["created_count"],
"completed_issues": stats["completed_count"],
"created_issues": stats["created_count"],
}
)
# Move to next month
if current_month.month == 12:
current_month = current_month.replace(
year=current_month.year + 1, month=1
)
else:
current_month = current_month.replace(month=current_month.month + 1)
else:
current_month = current_month.replace(month=current_month.month + 1)
schema = {
"completed_issues": "completed_issues",
@@ -515,8 +332,6 @@ class AdvanceAnalyticsChartEndpoint(AdvanceAnalyticsBaseView):
type = request.GET.get("type", "projects")
group_by = request.GET.get("group_by", None)
x_axis = request.GET.get("x_axis", "PRIORITY")
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
if type == "projects":
return Response(self.project_chart(), status=status.HTTP_200_OK)
@@ -530,19 +345,6 @@ class AdvanceAnalyticsChartEndpoint(AdvanceAnalyticsBaseView):
)
)
# Apply cycle/module filters if present
if cycle_id is not None:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
queryset = queryset.filter(id__in=cycle_issues)
elif module_id is not None:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
queryset = queryset.filter(id__in=module_issues)
# Apply date range filter if available
if self.filters["chart_period_range"]:
start_date, end_date = self.filters["chart_period_range"]
@@ -556,14 +358,8 @@ class AdvanceAnalyticsChartEndpoint(AdvanceAnalyticsBaseView):
)
elif type == "work-items":
# Optionally accept cycle_id or module_id as query params
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
peek_view = request.GET.get("peek_view", False)
return Response(
self.work_item_completion_chart(
cycle_id=cycle_id, module_id=module_id, peek_view=peek_view
),
self.work_item_completion_chart(),
status=status.HTTP_200_OK,
)

View File

@@ -0,0 +1,421 @@
from rest_framework.response import Response
from rest_framework import status
from typing import Dict, Any
from django.db.models import QuerySet, Q, Count
from django.http import HttpRequest
from django.db.models.functions import TruncMonth
from django.utils import timezone
from datetime import timedelta
from plane.app.views.base import BaseAPIView
from plane.app.permissions import ROLE, allow_permission
from plane.db.models import (
Project,
Issue,
Cycle,
Module,
CycleIssue,
ModuleIssue,
)
from django.db import models
from django.db.models import F, Case, When, Value
from django.db.models.functions import Concat
from plane.utils.build_chart import build_analytics_chart
from plane.utils.date_utils import (
get_analytics_filters,
)
class ProjectAdvanceAnalyticsBaseView(BaseAPIView):
def initialize_workspace(self, slug: str, type: str) -> None:
self._workspace_slug = slug
self.filters = get_analytics_filters(
slug=slug,
type=type,
user=self.request.user,
date_filter=self.request.GET.get("date_filter", None),
project_ids=self.request.GET.get("project_ids", None),
)
class ProjectAdvanceAnalyticsEndpoint(ProjectAdvanceAnalyticsBaseView):
def get_filtered_counts(self, queryset: QuerySet) -> Dict[str, int]:
def get_filtered_count() -> int:
if self.filters["analytics_date_range"]:
return queryset.filter(
created_at__gte=self.filters["analytics_date_range"]["current"][
"gte"
],
created_at__lte=self.filters["analytics_date_range"]["current"][
"lte"
],
).count()
return queryset.count()
return {
"count": get_filtered_count(),
}
def get_work_items_stats(
self, project_id, cycle_id=None, module_id=None
) -> Dict[str, Dict[str, int]]:
"""
Returns work item stats for the workspace, or filtered by cycle_id or module_id if provided.
"""
base_queryset = None
if cycle_id is not None:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=cycle_issues)
elif module_id is not None:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=module_issues)
else:
base_queryset = Issue.issue_objects.filter(
**self.filters["base_filters"], project_id=project_id
)
return {
"total_work_items": self.get_filtered_counts(base_queryset),
"started_work_items": self.get_filtered_counts(
base_queryset.filter(state__group="started")
),
"backlog_work_items": self.get_filtered_counts(
base_queryset.filter(state__group="backlog")
),
"un_started_work_items": self.get_filtered_counts(
base_queryset.filter(state__group="unstarted")
),
"completed_work_items": self.get_filtered_counts(
base_queryset.filter(state__group="completed")
),
}
@allow_permission([ROLE.ADMIN, ROLE.MEMBER])
def get(self, request: HttpRequest, slug: str, project_id: str) -> Response:
self.initialize_workspace(slug, type="analytics")
# Optionally accept cycle_id or module_id as query params
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
return Response(
self.get_work_items_stats(
cycle_id=cycle_id, module_id=module_id, project_id=project_id
),
status=status.HTTP_200_OK,
)
class ProjectAdvanceAnalyticsStatsEndpoint(ProjectAdvanceAnalyticsBaseView):
def get_project_issues_stats(self) -> QuerySet:
# Get the base queryset with workspace and project filters
base_queryset = Issue.issue_objects.filter(**self.filters["base_filters"])
# Apply date range filter if available
if self.filters["chart_period_range"]:
start_date, end_date = self.filters["chart_period_range"]
base_queryset = base_queryset.filter(
created_at__date__gte=start_date, created_at__date__lte=end_date
)
return (
base_queryset.values("project_id", "project__name")
.annotate(
cancelled_work_items=Count("id", filter=Q(state__group="cancelled")),
completed_work_items=Count("id", filter=Q(state__group="completed")),
backlog_work_items=Count("id", filter=Q(state__group="backlog")),
un_started_work_items=Count("id", filter=Q(state__group="unstarted")),
started_work_items=Count("id", filter=Q(state__group="started")),
)
.order_by("project_id")
)
def get_work_items_stats(
self, project_id, cycle_id=None, module_id=None
) -> Dict[str, Dict[str, int]]:
base_queryset = None
if cycle_id is not None:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=cycle_issues)
elif module_id is not None:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
base_queryset = Issue.issue_objects.filter(id__in=module_issues)
else:
base_queryset = Issue.issue_objects.filter(
**self.filters["base_filters"], project_id=project_id
)
return (
base_queryset.annotate(display_name=F("assignees__display_name"))
.annotate(assignee_id=F("assignees__id"))
.annotate(avatar=F("assignees__avatar"))
.annotate(
avatar_url=Case(
# If `avatar_asset` exists, use it to generate the asset URL
When(
assignees__avatar_asset__isnull=False,
then=Concat(
Value("/api/assets/v2/static/"),
"assignees__avatar_asset", # Assuming avatar_asset has an id or relevant field
Value("/"),
),
),
# If `avatar_asset` is None, fall back to using `avatar` field directly
When(
assignees__avatar_asset__isnull=True, then="assignees__avatar"
),
default=Value(None),
output_field=models.CharField(),
)
)
.values("display_name", "assignee_id", "avatar_url")
.annotate(
cancelled_work_items=Count(
"id", filter=Q(state__group="cancelled"), distinct=True
),
completed_work_items=Count(
"id", filter=Q(state__group="completed"), distinct=True
),
backlog_work_items=Count(
"id", filter=Q(state__group="backlog"), distinct=True
),
un_started_work_items=Count(
"id", filter=Q(state__group="unstarted"), distinct=True
),
started_work_items=Count(
"id", filter=Q(state__group="started"), distinct=True
),
)
.order_by("display_name")
)
@allow_permission([ROLE.ADMIN, ROLE.MEMBER])
def get(self, request: HttpRequest, slug: str, project_id: str) -> Response:
self.initialize_workspace(slug, type="chart")
type = request.GET.get("type", "work-items")
if type == "work-items":
# Optionally accept cycle_id or module_id as query params
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
return Response(
self.get_work_items_stats(
project_id=project_id, cycle_id=cycle_id, module_id=module_id
),
status=status.HTTP_200_OK,
)
return Response({"message": "Invalid type"}, status=status.HTTP_400_BAD_REQUEST)
class ProjectAdvanceAnalyticsChartEndpoint(ProjectAdvanceAnalyticsBaseView):
def work_item_completion_chart(
self, project_id, cycle_id=None, module_id=None
) -> Dict[str, Any]:
# Get the base queryset
queryset = (
Issue.issue_objects.filter(**self.filters["base_filters"])
.filter(project_id=project_id)
.select_related("workspace", "state", "parent")
.prefetch_related(
"assignees", "labels", "issue_module__module", "issue_cycle__cycle"
)
)
if cycle_id is not None:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
cycle = Cycle.objects.filter(id=cycle_id).first()
if cycle and cycle.start_date:
start_date = cycle.start_date.date()
end_date = cycle.end_date.date()
else:
return {"data": [], "schema": {}}
queryset = cycle_issues
elif module_id is not None:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
module = Module.objects.filter(id=module_id).first()
if module and module.start_date:
start_date = module.start_date
end_date = module.target_date
else:
return {"data": [], "schema": {}}
queryset = module_issues
else:
project = Project.objects.filter(id=project_id).first()
if project.created_at:
start_date = project.created_at.date().replace(day=1)
else:
return {"data": [], "schema": {}}
if cycle_id or module_id:
# Get daily stats with optimized query
daily_stats = (
queryset.values("created_at__date")
.annotate(
created_count=Count("id"),
completed_count=Count(
"id", filter=Q(issue__state__group="completed")
),
)
.order_by("created_at__date")
)
# Create a dictionary of existing stats with summed counts
stats_dict = {
stat["created_at__date"].strftime("%Y-%m-%d"): {
"created_count": stat["created_count"],
"completed_count": stat["completed_count"],
}
for stat in daily_stats
}
# Generate data for all days in the range
data = []
current_date = start_date
while current_date <= end_date:
date_str = current_date.strftime("%Y-%m-%d")
stats = stats_dict.get(
date_str, {"created_count": 0, "completed_count": 0}
)
data.append(
{
"key": date_str,
"name": date_str,
"count": stats["created_count"] + stats["completed_count"],
"completed_issues": stats["completed_count"],
"created_issues": stats["created_count"],
}
)
current_date += timedelta(days=1)
else:
# Apply date range filter if available
if self.filters["chart_period_range"]:
start_date, end_date = self.filters["chart_period_range"]
queryset = queryset.filter(
created_at__date__gte=start_date, created_at__date__lte=end_date
)
# Annotate by month and count
monthly_stats = (
queryset.annotate(month=TruncMonth("created_at"))
.values("month")
.annotate(
created_count=Count("id"),
completed_count=Count("id", filter=Q(state__group="completed")),
)
.order_by("month")
)
# Create dictionary of month -> counts
stats_dict = {
stat["month"].strftime("%Y-%m-%d"): {
"created_count": stat["created_count"],
"completed_count": stat["completed_count"],
}
for stat in monthly_stats
}
# Generate monthly data (ensure months with 0 count are included)
data = []
# include the current date at the end
end_date = timezone.now().date()
last_month = end_date.replace(day=1)
current_month = start_date
while current_month <= last_month:
date_str = current_month.strftime("%Y-%m-%d")
stats = stats_dict.get(
date_str, {"created_count": 0, "completed_count": 0}
)
data.append(
{
"key": date_str,
"name": date_str,
"count": stats["created_count"],
"completed_issues": stats["completed_count"],
"created_issues": stats["created_count"],
}
)
# Move to next month
if current_month.month == 12:
current_month = current_month.replace(
year=current_month.year + 1, month=1
)
else:
current_month = current_month.replace(month=current_month.month + 1)
schema = {
"completed_issues": "completed_issues",
"created_issues": "created_issues",
}
return {"data": data, "schema": schema}
@allow_permission([ROLE.ADMIN, ROLE.MEMBER, ROLE.GUEST])
def get(self, request: HttpRequest, slug: str, project_id: str) -> Response:
self.initialize_workspace(slug, type="chart")
type = request.GET.get("type", "projects")
group_by = request.GET.get("group_by", None)
x_axis = request.GET.get("x_axis", "PRIORITY")
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
if type == "custom-work-items":
queryset = (
Issue.issue_objects.filter(**self.filters["base_filters"])
.filter(project_id=project_id)
.select_related("workspace", "state", "parent")
.prefetch_related(
"assignees", "labels", "issue_module__module", "issue_cycle__cycle"
)
)
# Apply cycle/module filters if present
if cycle_id is not None:
cycle_issues = CycleIssue.objects.filter(
**self.filters["base_filters"], cycle_id=cycle_id
).values_list("issue_id", flat=True)
queryset = queryset.filter(id__in=cycle_issues)
elif module_id is not None:
module_issues = ModuleIssue.objects.filter(
**self.filters["base_filters"], module_id=module_id
).values_list("issue_id", flat=True)
queryset = queryset.filter(id__in=module_issues)
# Apply date range filter if available
if self.filters["chart_period_range"]:
start_date, end_date = self.filters["chart_period_range"]
queryset = queryset.filter(
created_at__date__gte=start_date, created_at__date__lte=end_date
)
return Response(
build_analytics_chart(queryset, x_axis, group_by),
status=status.HTTP_200_OK,
)
elif type == "work-items":
# Optionally accept cycle_id or module_id as query params
cycle_id = request.GET.get("cycle_id", None)
module_id = request.GET.get("module_id", None)
return Response(
self.work_item_completion_chart(
project_id=project_id, cycle_id=cycle_id, module_id=module_id
),
status=status.HTTP_200_OK,
)
return Response({"message": "Invalid type"}, status=status.HTTP_400_BAD_REQUEST)

View File

@@ -445,7 +445,7 @@ class ProjectViewSet(BaseViewSet):
is_active=True,
).exists()
):
project = Project.objects.get(pk=pk)
project = Project.objects.get(pk=pk, workspace__slug=slug)
project.delete()
webhook_activity.delay(
event="project",

View File

@@ -42,11 +42,11 @@ urlpatterns = [
# credentials
path("sign-in/", SignInAuthEndpoint.as_view(), name="sign-in"),
path("sign-up/", SignUpAuthEndpoint.as_view(), name="sign-up"),
path("spaces/sign-in/", SignInAuthSpaceEndpoint.as_view(), name="sign-in"),
path("spaces/sign-up/", SignUpAuthSpaceEndpoint.as_view(), name="sign-in"),
path("spaces/sign-in/", SignInAuthSpaceEndpoint.as_view(), name="space-sign-in"),
path("spaces/sign-up/", SignUpAuthSpaceEndpoint.as_view(), name="space-sign-up"),
# signout
path("sign-out/", SignOutAuthEndpoint.as_view(), name="sign-out"),
path("spaces/sign-out/", SignOutAuthSpaceEndpoint.as_view(), name="sign-out"),
path("spaces/sign-out/", SignOutAuthSpaceEndpoint.as_view(), name="space-sign-out"),
# csrf token
path("get-csrf-token/", CSRFTokenEndpoint.as_view(), name="get_csrf_token"),
# Magic sign in
@@ -56,17 +56,17 @@ urlpatterns = [
path(
"spaces/magic-generate/",
MagicGenerateSpaceEndpoint.as_view(),
name="magic-generate",
name="space-magic-generate",
),
path(
"spaces/magic-sign-in/",
MagicSignInSpaceEndpoint.as_view(),
name="magic-sign-in",
name="space-magic-sign-in",
),
path(
"spaces/magic-sign-up/",
MagicSignUpSpaceEndpoint.as_view(),
name="magic-sign-up",
name="space-magic-sign-up",
),
## Google Oauth
path("google/", GoogleOauthInitiateEndpoint.as_view(), name="google-initiate"),
@@ -74,12 +74,12 @@ urlpatterns = [
path(
"spaces/google/",
GoogleOauthInitiateSpaceEndpoint.as_view(),
name="google-initiate",
name="space-google-initiate",
),
path(
"google/callback/",
"spaces/google/callback/",
GoogleCallbackSpaceEndpoint.as_view(),
name="google-callback",
name="space-google-callback",
),
## Github Oauth
path("github/", GitHubOauthInitiateEndpoint.as_view(), name="github-initiate"),
@@ -87,12 +87,12 @@ urlpatterns = [
path(
"spaces/github/",
GitHubOauthInitiateSpaceEndpoint.as_view(),
name="github-initiate",
name="space-github-initiate",
),
path(
"spaces/github/callback/",
GitHubCallbackSpaceEndpoint.as_view(),
name="github-callback",
name="space-github-callback",
),
## Gitlab Oauth
path("gitlab/", GitLabOauthInitiateEndpoint.as_view(), name="gitlab-initiate"),
@@ -100,12 +100,12 @@ urlpatterns = [
path(
"spaces/gitlab/",
GitLabOauthInitiateSpaceEndpoint.as_view(),
name="gitlab-initiate",
name="space-gitlab-initiate",
),
path(
"spaces/gitlab/callback/",
GitLabCallbackSpaceEndpoint.as_view(),
name="gitlab-callback",
name="space-gitlab-callback",
),
# Email Check
path("email-check/", EmailCheckEndpoint.as_view(), name="email-check"),
@@ -120,12 +120,12 @@ urlpatterns = [
path(
"spaces/forgot-password/",
ForgotPasswordSpaceEndpoint.as_view(),
name="forgot-password",
name="space-forgot-password",
),
path(
"spaces/reset-password/<uidb64>/<token>/",
ResetPasswordSpaceEndpoint.as_view(),
name="forgot-password",
name="space-forgot-password",
),
path("change-password/", ChangePasswordEndpoint.as_view(), name="forgot-password"),
path("set-password/", SetUserPasswordEndpoint.as_view(), name="set-password"),

View File

@@ -57,7 +57,7 @@ class InstanceEndpoint(BaseAPIView):
POSTHOG_API_KEY,
POSTHOG_HOST,
UNSPLASH_ACCESS_KEY,
OPENAI_API_KEY,
LLM_API_KEY,
IS_INTERCOM_ENABLED,
INTERCOM_APP_ID,
) = get_configuration_value(
@@ -112,8 +112,8 @@ class InstanceEndpoint(BaseAPIView):
"default": os.environ.get("UNSPLASH_ACCESS_KEY", ""),
},
{
"key": "OPENAI_API_KEY",
"default": os.environ.get("OPENAI_API_KEY", ""),
"key": "LLM_API_KEY",
"default": os.environ.get("LLM_API_KEY", ""),
},
# Intercom settings
{
@@ -151,7 +151,7 @@ class InstanceEndpoint(BaseAPIView):
data["has_unsplash_configured"] = bool(UNSPLASH_ACCESS_KEY)
# Open AI settings
data["has_openai_configured"] = bool(OPENAI_API_KEY)
data["has_llm_configured"] = bool(LLM_API_KEY)
# File size settings
data["file_size_limit"] = float(os.environ.get("FILE_SIZE_LIMIT", 5242880))

View File

@@ -83,6 +83,32 @@ class APITokenLogMiddleware:
self.process_request(request, response, request_body)
return response
def _safe_decode_body(self, content):
"""
Safely decodes request/response body content, handling binary data.
Returns None if content is None, or a string representation of the content.
"""
# If the content is None, return None
if content is None:
return None
# If the content is an empty bytes object, return None
if content == b"":
return None
# Check if content is binary by looking for common binary file signatures
if (
content.startswith(b"\x89PNG")
or content.startswith(b"\xff\xd8\xff")
or content.startswith(b"%PDF")
):
return "[Binary Content]"
try:
return content.decode("utf-8")
except UnicodeDecodeError:
return "[Could not decode content]"
def process_request(self, request, response, request_body):
api_key_header = "X-Api-Key"
api_key = request.headers.get(api_key_header)
@@ -95,9 +121,13 @@ class APITokenLogMiddleware:
method=request.method,
query_params=request.META.get("QUERY_STRING", ""),
headers=str(request.headers),
body=(request_body.decode("utf-8") if request_body else None),
body=(
self._safe_decode_body(request_body) if request_body else None
),
response_body=(
response.content.decode("utf-8") if response.content else None
self._safe_decode_body(response.content)
if response.content
else None
),
response_code=response.status_code,
ip_address=get_client_ip(request=request),

View File

@@ -179,7 +179,7 @@ class ProjectIssuesPublicEndpoint(BaseAPIView):
Q(issue_intake__status=1)
| Q(issue_intake__status=-1)
| Q(issue_intake__status=2)
| Q(issue_intake__status=True),
| Q(issue_intake__isnull=True),
archived_at__isnull=True,
is_draft=False,
),
@@ -205,7 +205,7 @@ class ProjectIssuesPublicEndpoint(BaseAPIView):
Q(issue_intake__status=1)
| Q(issue_intake__status=-1)
| Q(issue_intake__status=2)
| Q(issue_intake__status=True),
| Q(issue_intake__isnull=True),
archived_at__isnull=True,
is_draft=False,
),

View File

@@ -0,0 +1,143 @@
# Plane Tests
This directory contains tests for the Plane application. The tests are organized using pytest.
## Test Structure
Tests are organized into the following categories:
- **Unit tests**: Test individual functions or classes in isolation.
- **Contract tests**: Test interactions between components and verify API contracts are fulfilled.
- **API tests**: Test the external API endpoints (under `/api/v1/`).
- **App tests**: Test the web application API endpoints (under `/api/`).
- **Smoke tests**: Basic tests to verify that the application runs correctly.
## API vs App Endpoints
Plane has two types of API endpoints:
1. **External API** (`plane.api`):
- Available at `/api/v1/` endpoint
- Uses API key authentication (X-Api-Key header)
- Designed for external API contracts and third-party access
- Tests use the `api_key_client` fixture for authentication
- Test files are in `contract/api/`
2. **Web App API** (`plane.app`):
- Available at `/api/` endpoint
- Uses session-based authentication (CSRF disabled)
- Designed for the web application frontend
- Tests use the `session_client` fixture for authentication
- Test files are in `contract/app/`
## Running Tests
To run all tests:
```bash
python -m pytest
```
To run specific test categories:
```bash
# Run unit tests
python -m pytest plane/tests/unit/
# Run API contract tests
python -m pytest plane/tests/contract/api/
# Run App contract tests
python -m pytest plane/tests/contract/app/
# Run smoke tests
python -m pytest plane/tests/smoke/
```
For convenience, we also provide a helper script:
```bash
# Run all tests
./run_tests.py
# Run only unit tests
./run_tests.py -u
# Run contract tests with coverage report
./run_tests.py -c -o
# Run tests in parallel
./run_tests.py -p
```
## Fixtures
The following fixtures are available for testing:
- `api_client`: Unauthenticated API client
- `create_user`: Creates a test user
- `api_token`: API token for the test user
- `api_key_client`: API client with API key authentication (for external API tests)
- `session_client`: API client with session authentication (for app API tests)
- `plane_server`: Live Django test server for HTTP-based smoke tests
## Writing Tests
When writing tests, follow these guidelines:
1. Place tests in the appropriate directory based on their type.
2. Use the correct client fixture based on the API being tested:
- For external API (`/api/v1/`), use `api_key_client`
- For web app API (`/api/`), use `session_client`
- For smoke tests with real HTTP, use `plane_server`
3. Use the correct URL namespace when reverse-resolving URLs:
- For external API, use `reverse("api:endpoint_name")`
- For web app API, use `reverse("endpoint_name")`
4. Add the `@pytest.mark.django_db` decorator to tests that interact with the database.
5. Add the appropriate markers (`@pytest.mark.contract`, etc.) to categorize tests.
## Test Fixtures
Common fixtures are defined in:
- `conftest.py`: General fixtures for authentication, database access, etc.
- `conftest_external.py`: Fixtures for external services (Redis, Elasticsearch, Celery, MongoDB)
- `factories.py`: Test factories for easy model instance creation
## Best Practices
When writing tests, follow these guidelines:
1. **Use pytest's assert syntax** instead of Django's `self.assert*` methods.
2. **Add markers to categorize tests**:
```python
@pytest.mark.unit
@pytest.mark.contract
@pytest.mark.smoke
```
3. **Use fixtures instead of setUp/tearDown methods** for cleaner, more reusable test code.
4. **Mock external dependencies** with the provided fixtures to avoid external service dependencies.
5. **Write focused tests** that verify one specific behavior or edge case.
6. **Keep test files small and organized** by logical components or endpoints.
7. **Target 90% code coverage** for models, serializers, and business logic.
## External Dependencies
Tests for components that interact with external services should:
1. Use the `mock_redis`, `mock_elasticsearch`, `mock_mongodb`, and `mock_celery` fixtures for unit and most contract tests.
2. For more comprehensive contract tests, use Docker-based test containers (optional).
## Coverage Reports
Generate a coverage report with:
```bash
python -m pytest --cov=plane --cov-report=term --cov-report=html
```
This creates an HTML report in the `htmlcov/` directory.
## Migration from Old Tests
Some tests are still in the old format in the `api/` directory. These need to be migrated to the new contract test structure in the appropriate directories.

View File

@@ -0,0 +1,151 @@
# Testing Guide for Plane
This guide explains how to write tests for Plane using our pytest-based testing strategy.
## Test Categories
We divide tests into three categories:
1. **Unit Tests**: Testing individual components in isolation.
2. **Contract Tests**: Testing API endpoints and verifying contracts between components.
3. **Smoke Tests**: Basic end-to-end tests for critical flows.
## Writing Unit Tests
Unit tests should be placed in the appropriate directory under `tests/unit/` depending on what you're testing:
- `tests/unit/models/` - For model tests
- `tests/unit/serializers/` - For serializer tests
- `tests/unit/utils/` - For utility function tests
### Example Unit Test:
```python
import pytest
from plane.api.serializers import MySerializer
@pytest.mark.unit
class TestMySerializer:
def test_serializer_valid_data(self):
# Create input data
data = {"field1": "value1", "field2": 42}
# Initialize the serializer
serializer = MySerializer(data=data)
# Validate
assert serializer.is_valid()
# Check validated data
assert serializer.validated_data["field1"] == "value1"
assert serializer.validated_data["field2"] == 42
```
## Writing Contract Tests
Contract tests should be placed in `tests/contract/api/` or `tests/contract/app/` directories and should test the API endpoints.
### Example Contract Test:
```python
import pytest
from django.urls import reverse
from rest_framework import status
@pytest.mark.contract
class TestMyEndpoint:
@pytest.mark.django_db
def test_my_endpoint_get(self, auth_client):
# Get the URL
url = reverse("my-endpoint")
# Make request
response = auth_client.get(url)
# Check response
assert response.status_code == status.HTTP_200_OK
assert "data" in response.data
```
## Writing Smoke Tests
Smoke tests should be placed in `tests/smoke/` directory and use the `plane_server` fixture to test against a real HTTP server.
### Example Smoke Test:
```python
import pytest
import requests
@pytest.mark.smoke
class TestCriticalFlow:
@pytest.mark.django_db
def test_login_flow(self, plane_server, create_user, user_data):
# Get login URL
url = f"{plane_server.url}/api/auth/signin/"
# Test login
response = requests.post(
url,
json={
"email": user_data["email"],
"password": user_data["password"]
}
)
# Verify
assert response.status_code == 200
data = response.json()
assert "access_token" in data
```
## Useful Fixtures
Our test setup provides several useful fixtures:
1. `api_client`: An unauthenticated DRF APIClient
2. `api_key_client`: API client with API key authentication (for external API tests)
3. `session_client`: API client with session authentication (for web app API tests)
4. `create_user`: Creates and returns a test user
5. `mock_redis`: Mocks Redis interactions
6. `mock_elasticsearch`: Mocks Elasticsearch interactions
7. `mock_celery`: Mocks Celery task execution
## Using Factory Boy
For more complex test data setup, use the provided factories:
```python
from plane.tests.factories import UserFactory, WorkspaceFactory
# Create a user
user = UserFactory()
# Create a workspace with a specific owner
workspace = WorkspaceFactory(owner=user)
# Create multiple objects
users = UserFactory.create_batch(5)
```
## Running Tests
Use pytest to run tests:
```bash
# Run all tests
python -m pytest
# Run only unit tests with coverage
python -m pytest -m unit --cov=plane
```
## Best Practices
1. **Keep tests small and focused** - Each test should verify one specific behavior.
2. **Use markers** - Always add appropriate markers (`@pytest.mark.unit`, etc.).
3. **Mock external dependencies** - Use the provided mock fixtures.
4. **Use factories** - For complex data setup, use factories.
5. **Don't test the framework** - Focus on testing your business logic, not Django/DRF itself.
6. **Write readable assertions** - Use plain `assert` statements with clear messaging.
7. **Focus on coverage** - Aim for ≥90% code coverage for critical components.

View File

@@ -1 +1 @@
from .api import *
# Test package initialization

View File

@@ -1,34 +0,0 @@
# Third party imports
from rest_framework.test import APITestCase, APIClient
# Module imports
from plane.db.models import User
from plane.app.views.authentication import get_tokens_for_user
class BaseAPITest(APITestCase):
def setUp(self):
self.client = APIClient(HTTP_USER_AGENT="plane/test", REMOTE_ADDR="10.10.10.10")
class AuthenticatedAPITest(BaseAPITest):
def setUp(self):
super().setUp()
## Create Dummy User
self.email = "user@plane.so"
user = User.objects.create(email=self.email)
user.set_password("user@123")
user.save()
# Set user
self.user = user
# Set Up User ID
self.user_id = user.id
access_token, _ = get_tokens_for_user(user)
self.access_token = access_token
# Set Up Authentication Token
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token)

View File

@@ -1 +0,0 @@
# TODO: Tests for File Asset Uploads

View File

@@ -1 +0,0 @@
# TODO: Tests for ChangePassword and other Endpoints

View File

@@ -1,183 +0,0 @@
# Python import
import json
# Django imports
from django.urls import reverse
# Third Party imports
from rest_framework import status
from .base import BaseAPITest
# Module imports
from plane.db.models import User
from plane.settings.redis import redis_instance
class SignInEndpointTests(BaseAPITest):
def setUp(self):
super().setUp()
user = User.objects.create(email="user@plane.so")
user.set_password("user@123")
user.save()
def test_without_data(self):
url = reverse("sign-in")
response = self.client.post(url, {}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_email_validity(self):
url = reverse("sign-in")
response = self.client.post(
url, {"email": "useremail.com", "password": "user@123"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data, {"error": "Please provide a valid email address."}
)
def test_password_validity(self):
url = reverse("sign-in")
response = self.client.post(
url, {"email": "user@plane.so", "password": "user123"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.data,
{
"error": "Sorry, we could not find a user with the provided credentials. Please try again."
},
)
def test_user_exists(self):
url = reverse("sign-in")
response = self.client.post(
url, {"email": "user@email.so", "password": "user123"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.data,
{
"error": "Sorry, we could not find a user with the provided credentials. Please try again."
},
)
def test_user_login(self):
url = reverse("sign-in")
response = self.client.post(
url, {"email": "user@plane.so", "password": "user@123"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get("user").get("email"), "user@plane.so")
class MagicLinkGenerateEndpointTests(BaseAPITest):
def setUp(self):
super().setUp()
user = User.objects.create(email="user@plane.so")
user.set_password("user@123")
user.save()
def test_without_data(self):
url = reverse("magic-generate")
response = self.client.post(url, {}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_email_validity(self):
url = reverse("magic-generate")
response = self.client.post(url, {"email": "useremail.com"}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data, {"error": "Please provide a valid email address."}
)
def test_magic_generate(self):
url = reverse("magic-generate")
ri = redis_instance()
ri.delete("magic_user@plane.so")
response = self.client.post(url, {"email": "user@plane.so"}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_max_generate_attempt(self):
url = reverse("magic-generate")
ri = redis_instance()
ri.delete("magic_user@plane.so")
for _ in range(4):
response = self.client.post(url, {"email": "user@plane.so"}, format="json")
response = self.client.post(url, {"email": "user@plane.so"}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data, {"error": "Max attempts exhausted. Please try again later."}
)
class MagicSignInEndpointTests(BaseAPITest):
def setUp(self):
super().setUp()
user = User.objects.create(email="user@plane.so")
user.set_password("user@123")
user.save()
def test_without_data(self):
url = reverse("magic-sign-in")
response = self.client.post(url, {}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"error": "User token and key are required"})
def test_expired_invalid_magic_link(self):
ri = redis_instance()
ri.delete("magic_user@plane.so")
url = reverse("magic-sign-in")
response = self.client.post(
url,
{"key": "magic_user@plane.so", "token": "xxxx-xxxxx-xxxx"},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data, {"error": "The magic code/link has expired please try again"}
)
def test_invalid_magic_code(self):
ri = redis_instance()
ri.delete("magic_user@plane.so")
## Create Token
url = reverse("magic-generate")
self.client.post(url, {"email": "user@plane.so"}, format="json")
url = reverse("magic-sign-in")
response = self.client.post(
url,
{"key": "magic_user@plane.so", "token": "xxxx-xxxxx-xxxx"},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data, {"error": "Your login code was incorrect. Please try again."}
)
def test_magic_code_sign_in(self):
ri = redis_instance()
ri.delete("magic_user@plane.so")
## Create Token
url = reverse("magic-generate")
self.client.post(url, {"email": "user@plane.so"}, format="json")
# Get the token
user_data = json.loads(ri.get("magic_user@plane.so"))
token = user_data["token"]
url = reverse("magic-sign-in")
response = self.client.post(
url, {"key": "magic_user@plane.so", "token": token}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get("user").get("email"), "user@plane.so")

View File

@@ -1 +0,0 @@
# TODO: Write Test for Cycle Endpoints

View File

@@ -1 +0,0 @@
# TODO: Write Test for Issue Endpoints

View File

@@ -1 +0,0 @@
# TODO: Tests for OAuth Authentication Endpoint

View File

@@ -1 +0,0 @@
# TODO: Write Test for people Endpoint

View File

@@ -1 +0,0 @@
# TODO: Write Tests for project endpoints

View File

@@ -1 +0,0 @@
# TODO: Write Test for shortcuts

View File

@@ -1 +0,0 @@
# TODO: Wrote test for state endpoints

View File

@@ -1 +0,0 @@
# TODO: Write test for view endpoints

View File

@@ -1,44 +0,0 @@
# Django imports
from django.urls import reverse
# Third party import
from rest_framework import status
# Module imports
from .base import AuthenticatedAPITest
from plane.db.models import Workspace, WorkspaceMember
class WorkSpaceCreateReadUpdateDelete(AuthenticatedAPITest):
def setUp(self):
super().setUp()
def test_create_workspace(self):
url = reverse("workspace")
# Test with empty data
response = self.client.post(url, {}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Test with valid data
response = self.client.post(
url, {"name": "Plane", "slug": "pla-ne"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Workspace.objects.count(), 1)
# Check if the member is created
self.assertEqual(WorkspaceMember.objects.count(), 1)
# Check other values
workspace = Workspace.objects.get(pk=response.data["id"])
workspace_member = WorkspaceMember.objects.get(
workspace=workspace, member_id=self.user_id
)
self.assertEqual(workspace.owner_id, self.user_id)
self.assertEqual(workspace_member.role, 20)
# Create a already existing workspace
response = self.client.post(
url, {"name": "Plane", "slug": "pla-ne"}, format="json"
)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)

View File

@@ -0,0 +1,78 @@
import pytest
from django.conf import settings
from rest_framework.test import APIClient
from pytest_django.fixtures import django_db_setup
from unittest.mock import patch, MagicMock
from plane.db.models import User
from plane.db.models.api import APIToken
@pytest.fixture(scope="session")
def django_db_setup(django_db_setup):
"""Set up the Django database for the test session"""
pass
@pytest.fixture
def api_client():
"""Return an unauthenticated API client"""
return APIClient()
@pytest.fixture
def user_data():
"""Return standard user data for tests"""
return {
"email": "test@plane.so",
"password": "test-password",
"first_name": "Test",
"last_name": "User"
}
@pytest.fixture
def create_user(db, user_data):
"""Create and return a user instance"""
user = User.objects.create(
email=user_data["email"],
first_name=user_data["first_name"],
last_name=user_data["last_name"]
)
user.set_password(user_data["password"])
user.save()
return user
@pytest.fixture
def api_token(db, create_user):
"""Create and return an API token for testing the external API"""
token = APIToken.objects.create(
user=create_user,
label="Test API Token",
token="test-api-token-12345",
)
return token
@pytest.fixture
def api_key_client(api_client, api_token):
"""Return an API key authenticated client for external API testing"""
api_client.credentials(HTTP_X_API_KEY=api_token.token)
return api_client
@pytest.fixture
def session_client(api_client, create_user):
"""Return a session authenticated API client for app API testing, which is what plane.app uses"""
api_client.force_authenticate(user=create_user)
return api_client
@pytest.fixture
def plane_server(live_server):
"""
Renamed version of live_server fixture to avoid name clashes.
Returns a live Django server for testing HTTP requests.
"""
return live_server

View File

@@ -0,0 +1,117 @@
import pytest
from unittest.mock import MagicMock, patch
from django.conf import settings
@pytest.fixture
def mock_redis():
"""
Mock Redis for testing without actual Redis connection.
This fixture patches the redis_instance function to return a MagicMock
that behaves like a Redis client.
"""
mock_redis_client = MagicMock()
# Configure the mock to handle common Redis operations
mock_redis_client.get.return_value = None
mock_redis_client.set.return_value = True
mock_redis_client.delete.return_value = True
mock_redis_client.exists.return_value = 0
mock_redis_client.ttl.return_value = -1
# Start the patch
with patch('plane.settings.redis.redis_instance', return_value=mock_redis_client):
yield mock_redis_client
@pytest.fixture
def mock_elasticsearch():
"""
Mock Elasticsearch for testing without actual ES connection.
This fixture patches Elasticsearch to return a MagicMock
that behaves like an Elasticsearch client.
"""
mock_es_client = MagicMock()
# Configure the mock to handle common ES operations
mock_es_client.indices.exists.return_value = True
mock_es_client.indices.create.return_value = {"acknowledged": True}
mock_es_client.search.return_value = {"hits": {"total": {"value": 0}, "hits": []}}
mock_es_client.index.return_value = {"_id": "test_id", "result": "created"}
mock_es_client.update.return_value = {"_id": "test_id", "result": "updated"}
mock_es_client.delete.return_value = {"_id": "test_id", "result": "deleted"}
# Start the patch
with patch('elasticsearch.Elasticsearch', return_value=mock_es_client):
yield mock_es_client
@pytest.fixture
def mock_mongodb():
"""
Mock MongoDB for testing without actual MongoDB connection.
This fixture patches PyMongo to return a MagicMock that behaves like a MongoDB client.
"""
# Create mock MongoDB clients and collections
mock_mongo_client = MagicMock()
mock_mongo_db = MagicMock()
mock_mongo_collection = MagicMock()
# Set up the chain: client -> database -> collection
mock_mongo_client.__getitem__.return_value = mock_mongo_db
mock_mongo_client.get_database.return_value = mock_mongo_db
mock_mongo_db.__getitem__.return_value = mock_mongo_collection
# Configure common MongoDB collection operations
mock_mongo_collection.find_one.return_value = None
mock_mongo_collection.find.return_value = MagicMock(
__iter__=lambda x: iter([]),
count=lambda: 0
)
mock_mongo_collection.insert_one.return_value = MagicMock(
inserted_id="mock_id_123",
acknowledged=True
)
mock_mongo_collection.insert_many.return_value = MagicMock(
inserted_ids=["mock_id_123", "mock_id_456"],
acknowledged=True
)
mock_mongo_collection.update_one.return_value = MagicMock(
modified_count=1,
matched_count=1,
acknowledged=True
)
mock_mongo_collection.update_many.return_value = MagicMock(
modified_count=2,
matched_count=2,
acknowledged=True
)
mock_mongo_collection.delete_one.return_value = MagicMock(
deleted_count=1,
acknowledged=True
)
mock_mongo_collection.delete_many.return_value = MagicMock(
deleted_count=2,
acknowledged=True
)
mock_mongo_collection.count_documents.return_value = 0
# Start the patch
with patch('pymongo.MongoClient', return_value=mock_mongo_client):
yield mock_mongo_client
@pytest.fixture
def mock_celery():
"""
Mock Celery for testing without actual task execution.
This fixture patches Celery's task.delay() to prevent actual task execution.
"""
# Start the patch
with patch('celery.app.task.Task.delay') as mock_delay:
mock_delay.return_value = MagicMock(id="mock-task-id")
yield mock_delay

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,459 @@
import json
import uuid
import pytest
from django.urls import reverse
from django.utils import timezone
from rest_framework import status
from django.test import Client
from django.core.exceptions import ValidationError
from unittest.mock import patch, MagicMock
from plane.db.models import User
from plane.settings.redis import redis_instance
from plane.license.models import Instance
@pytest.fixture
def setup_instance(db):
"""Create and configure an instance for authentication tests"""
instance_id = uuid.uuid4() if not Instance.objects.exists() else Instance.objects.first().id
# Create or update instance with all required fields
instance, _ = Instance.objects.update_or_create(
id=instance_id,
defaults={
"instance_name": "Test Instance",
"instance_id": str(uuid.uuid4()),
"current_version": "1.0.0",
"domain": "http://localhost:8000",
"last_checked_at": timezone.now(),
"is_setup_done": True,
}
)
return instance
@pytest.fixture
def django_client():
"""Return a Django test client with User-Agent header for handling redirects"""
client = Client(HTTP_USER_AGENT="Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1")
return client
@pytest.mark.contract
class TestMagicLinkGenerate:
"""Test magic link generation functionality"""
@pytest.fixture
def setup_user(self, db):
"""Create a test user for magic link tests"""
user = User.objects.create(email="user@plane.so")
user.set_password("user@123")
user.save()
return user
@pytest.mark.django_db
def test_without_data(self, api_client, setup_user, setup_instance):
"""Test magic link generation with empty data"""
url = reverse("magic-generate")
try:
response = api_client.post(url, {}, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
except ValidationError:
# If a ValidationError is raised directly, that's also acceptable
# as it indicates the empty email was rejected
assert True
@pytest.mark.django_db
def test_email_validity(self, api_client, setup_user, setup_instance):
"""Test magic link generation with invalid email format"""
url = reverse("magic-generate")
try:
response = api_client.post(url, {"email": "useremail.com"}, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "error_code" in response.data # Check for error code in response
except ValidationError:
# If a ValidationError is raised directly, that's also acceptable
# as it indicates the invalid email was rejected
assert True
@pytest.mark.django_db
@patch("plane.bgtasks.magic_link_code_task.magic_link.delay")
def test_magic_generate(self, mock_magic_link, api_client, setup_user, setup_instance):
"""Test successful magic link generation"""
url = reverse("magic-generate")
ri = redis_instance()
ri.delete("magic_user@plane.so")
response = api_client.post(url, {"email": "user@plane.so"}, format="json")
assert response.status_code == status.HTTP_200_OK
assert "key" in response.data # Check for key in response
# Verify the mock was called with the expected arguments
mock_magic_link.assert_called_once()
args = mock_magic_link.call_args[0]
assert args[0] == "user@plane.so" # First arg should be the email
@pytest.mark.django_db
@patch("plane.bgtasks.magic_link_code_task.magic_link.delay")
def test_max_generate_attempt(self, mock_magic_link, api_client, setup_user, setup_instance):
"""Test exceeding maximum magic link generation attempts"""
url = reverse("magic-generate")
ri = redis_instance()
ri.delete("magic_user@plane.so")
for _ in range(4):
api_client.post(url, {"email": "user@plane.so"}, format="json")
response = api_client.post(url, {"email": "user@plane.so"}, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "error_code" in response.data # Check for error code in response
@pytest.mark.contract
class TestSignInEndpoint:
"""Test sign-in functionality"""
@pytest.fixture
def setup_user(self, db):
"""Create a test user for authentication tests"""
user = User.objects.create(email="user@plane.so")
user.set_password("user@123")
user.save()
return user
@pytest.mark.django_db
def test_without_data(self, django_client, setup_user, setup_instance):
"""Test sign-in with empty data"""
url = reverse("sign-in")
response = django_client.post(url, {}, follow=True)
# Check redirect contains error code
assert "REQUIRED_EMAIL_PASSWORD_SIGN_IN" in response.redirect_chain[-1][0]
@pytest.mark.django_db
def test_email_validity(self, django_client, setup_user, setup_instance):
"""Test sign-in with invalid email format"""
url = reverse("sign-in")
response = django_client.post(
url, {"email": "useremail.com", "password": "user@123"}, follow=True
)
# Check redirect contains error code
assert "INVALID_EMAIL_SIGN_IN" in response.redirect_chain[-1][0]
@pytest.mark.django_db
def test_user_exists(self, django_client, setup_user, setup_instance):
"""Test sign-in with non-existent user"""
url = reverse("sign-in")
response = django_client.post(
url, {"email": "user@email.so", "password": "user123"}, follow=True
)
# Check redirect contains error code
assert "USER_DOES_NOT_EXIST" in response.redirect_chain[-1][0]
@pytest.mark.django_db
def test_password_validity(self, django_client, setup_user, setup_instance):
"""Test sign-in with incorrect password"""
url = reverse("sign-in")
response = django_client.post(
url, {"email": "user@plane.so", "password": "user123"}, follow=True
)
# Check for the specific authentication error in the URL
redirect_urls = [url for url, _ in response.redirect_chain]
redirect_contents = ' '.join(redirect_urls)
# The actual error code for invalid password is AUTHENTICATION_FAILED_SIGN_IN
assert "AUTHENTICATION_FAILED_SIGN_IN" in redirect_contents
@pytest.mark.django_db
def test_user_login(self, django_client, setup_user, setup_instance):
"""Test successful sign-in"""
url = reverse("sign-in")
# First make the request without following redirects
response = django_client.post(
url, {"email": "user@plane.so", "password": "user@123"}, follow=False
)
# Check that the initial response is a redirect (302) without error code
assert response.status_code == 302
assert "error_code" not in response.url
# Now follow just the first redirect to avoid 404s
response = django_client.get(response.url, follow=False)
# The user should be authenticated regardless of the final page
assert "_auth_user_id" in django_client.session
@pytest.mark.django_db
def test_next_path_redirection(self, django_client, setup_user, setup_instance):
"""Test sign-in with next_path parameter"""
url = reverse("sign-in")
next_path = "workspaces"
# First make the request without following redirects
response = django_client.post(
url,
{"email": "user@plane.so", "password": "user@123", "next_path": next_path},
follow=False
)
# Check that the initial response is a redirect (302) without error code
assert response.status_code == 302
assert "error_code" not in response.url
# In a real browser, the next_path would be used to build the absolute URL
# Since we're just testing the authentication logic, we won't check for the exact URL structure
# Instead, just verify that we're authenticated
assert "_auth_user_id" in django_client.session
@pytest.mark.contract
class TestMagicSignIn:
"""Test magic link sign-in functionality"""
@pytest.fixture
def setup_user(self, db):
"""Create a test user for magic sign-in tests"""
user = User.objects.create(email="user@plane.so")
user.set_password("user@123")
user.save()
return user
@pytest.mark.django_db
def test_without_data(self, django_client, setup_user, setup_instance):
"""Test magic link sign-in with empty data"""
url = reverse("magic-sign-in")
response = django_client.post(url, {}, follow=True)
# Check redirect contains error code
assert "MAGIC_SIGN_IN_EMAIL_CODE_REQUIRED" in response.redirect_chain[-1][0]
@pytest.mark.django_db
def test_expired_invalid_magic_link(self, django_client, setup_user, setup_instance):
"""Test magic link sign-in with expired/invalid link"""
ri = redis_instance()
ri.delete("magic_user@plane.so")
url = reverse("magic-sign-in")
response = django_client.post(
url,
{"email": "user@plane.so", "code": "xxxx-xxxxx-xxxx"},
follow=False
)
# Check that we get a redirect
assert response.status_code == 302
# The actual error code is EXPIRED_MAGIC_CODE_SIGN_IN (when key doesn't exist)
# or INVALID_MAGIC_CODE_SIGN_IN (when key exists but code doesn't match)
assert "EXPIRED_MAGIC_CODE_SIGN_IN" in response.url or "INVALID_MAGIC_CODE_SIGN_IN" in response.url
@pytest.mark.django_db
def test_user_does_not_exist(self, django_client, setup_instance):
"""Test magic sign-in with non-existent user"""
url = reverse("magic-sign-in")
response = django_client.post(
url,
{"email": "nonexistent@plane.so", "code": "xxxx-xxxxx-xxxx"},
follow=True
)
# Check redirect contains error code
assert "USER_DOES_NOT_EXIST" in response.redirect_chain[-1][0]
@pytest.mark.django_db
@patch("plane.bgtasks.magic_link_code_task.magic_link.delay")
def test_magic_code_sign_in(self, mock_magic_link, django_client, api_client, setup_user, setup_instance):
"""Test successful magic link sign-in process"""
# First generate a magic link token
gen_url = reverse("magic-generate")
response = api_client.post(gen_url, {"email": "user@plane.so"}, format="json")
# Check that the token generation was successful
assert response.status_code == status.HTTP_200_OK
# Since we're mocking the magic_link task, we need to manually get the token from Redis
ri = redis_instance()
user_data = json.loads(ri.get("magic_user@plane.so"))
token = user_data["token"]
# Use Django client to test the redirect flow without following redirects
url = reverse("magic-sign-in")
response = django_client.post(
url,
{"email": "user@plane.so", "code": token},
follow=False
)
# Check that the initial response is a redirect without error code
assert response.status_code == 302
assert "error_code" not in response.url
# The user should now be authenticated
assert "_auth_user_id" in django_client.session
@pytest.mark.django_db
@patch("plane.bgtasks.magic_link_code_task.magic_link.delay")
def test_magic_sign_in_with_next_path(self, mock_magic_link, django_client, api_client, setup_user, setup_instance):
"""Test magic sign-in with next_path parameter"""
# First generate a magic link token
gen_url = reverse("magic-generate")
response = api_client.post(gen_url, {"email": "user@plane.so"}, format="json")
# Check that the token generation was successful
assert response.status_code == status.HTTP_200_OK
# Since we're mocking the magic_link task, we need to manually get the token from Redis
ri = redis_instance()
user_data = json.loads(ri.get("magic_user@plane.so"))
token = user_data["token"]
# Use Django client to test the redirect flow without following redirects
url = reverse("magic-sign-in")
next_path = "workspaces"
response = django_client.post(
url,
{"email": "user@plane.so", "code": token, "next_path": next_path},
follow=False
)
# Check that the initial response is a redirect without error code
assert response.status_code == 302
assert "error_code" not in response.url
# Check that the redirect URL contains the next_path
assert next_path in response.url
# The user should now be authenticated
assert "_auth_user_id" in django_client.session
@pytest.mark.contract
class TestMagicSignUp:
"""Test magic link sign-up functionality"""
@pytest.mark.django_db
def test_without_data(self, django_client, setup_instance):
"""Test magic link sign-up with empty data"""
url = reverse("magic-sign-up")
response = django_client.post(url, {}, follow=True)
# Check redirect contains error code
assert "MAGIC_SIGN_UP_EMAIL_CODE_REQUIRED" in response.redirect_chain[-1][0]
@pytest.mark.django_db
def test_user_already_exists(self, django_client, db, setup_instance):
"""Test magic sign-up with existing user"""
# Create a user that already exists
User.objects.create(email="existing@plane.so")
url = reverse("magic-sign-up")
response = django_client.post(
url,
{"email": "existing@plane.so", "code": "xxxx-xxxxx-xxxx"},
follow=True
)
# Check redirect contains error code
assert "USER_ALREADY_EXIST" in response.redirect_chain[-1][0]
@pytest.mark.django_db
def test_expired_invalid_magic_link(self, django_client, setup_instance):
"""Test magic link sign-up with expired/invalid link"""
url = reverse("magic-sign-up")
response = django_client.post(
url,
{"email": "new@plane.so", "code": "xxxx-xxxxx-xxxx"},
follow=False
)
# Check that we get a redirect
assert response.status_code == 302
# The actual error code is EXPIRED_MAGIC_CODE_SIGN_UP (when key doesn't exist)
# or INVALID_MAGIC_CODE_SIGN_UP (when key exists but code doesn't match)
assert "EXPIRED_MAGIC_CODE_SIGN_UP" in response.url or "INVALID_MAGIC_CODE_SIGN_UP" in response.url
@pytest.mark.django_db
@patch("plane.bgtasks.magic_link_code_task.magic_link.delay")
def test_magic_code_sign_up(self, mock_magic_link, django_client, api_client, setup_instance):
"""Test successful magic link sign-up process"""
email = "newuser@plane.so"
# First generate a magic link token
gen_url = reverse("magic-generate")
response = api_client.post(gen_url, {"email": email}, format="json")
# Check that the token generation was successful
assert response.status_code == status.HTTP_200_OK
# Since we're mocking the magic_link task, we need to manually get the token from Redis
ri = redis_instance()
user_data = json.loads(ri.get(f"magic_{email}"))
token = user_data["token"]
# Use Django client to test the redirect flow without following redirects
url = reverse("magic-sign-up")
response = django_client.post(
url,
{"email": email, "code": token},
follow=False
)
# Check that the initial response is a redirect without error code
assert response.status_code == 302
assert "error_code" not in response.url
# Check if user was created
assert User.objects.filter(email=email).exists()
# Check if user is authenticated
assert "_auth_user_id" in django_client.session
@pytest.mark.django_db
@patch("plane.bgtasks.magic_link_code_task.magic_link.delay")
def test_magic_sign_up_with_next_path(self, mock_magic_link, django_client, api_client, setup_instance):
"""Test magic sign-up with next_path parameter"""
email = "newuser2@plane.so"
# First generate a magic link token
gen_url = reverse("magic-generate")
response = api_client.post(gen_url, {"email": email}, format="json")
# Check that the token generation was successful
assert response.status_code == status.HTTP_200_OK
# Since we're mocking the magic_link task, we need to manually get the token from Redis
ri = redis_instance()
user_data = json.loads(ri.get(f"magic_{email}"))
token = user_data["token"]
# Use Django client to test the redirect flow without following redirects
url = reverse("magic-sign-up")
next_path = "onboarding"
response = django_client.post(
url,
{"email": email, "code": token, "next_path": next_path},
follow=False
)
# Check that the initial response is a redirect without error code
assert response.status_code == 302
assert "error_code" not in response.url
# In a real browser, the next_path would be used to build the absolute URL
# Since we're just testing the authentication logic, we won't check for the exact URL structure
# Check if user was created
assert User.objects.filter(email=email).exists()
# Check if user is authenticated
assert "_auth_user_id" in django_client.session

View File

@@ -0,0 +1,79 @@
import pytest
from django.urls import reverse
from rest_framework import status
from unittest.mock import patch
from plane.db.models import Workspace, WorkspaceMember
@pytest.mark.contract
class TestWorkspaceAPI:
"""Test workspace CRUD operations"""
@pytest.mark.django_db
def test_create_workspace_empty_data(self, session_client):
"""Test creating a workspace with empty data"""
url = reverse("workspace")
# Test with empty data
response = session_client.post(url, {}, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
@patch("plane.bgtasks.workspace_seed_task.workspace_seed.delay")
def test_create_workspace_valid_data(self, mock_workspace_seed, session_client, create_user):
"""Test creating a workspace with valid data"""
url = reverse("workspace")
user = create_user # Use the create_user fixture directly as it returns a user object
# Test with valid data - include all required fields
workspace_data = {
"name": "Plane",
"slug": "pla-ne-test",
"company_name": "Plane Inc."
}
# Make the request
response = session_client.post(url, workspace_data, format="json")
# Check response status
assert response.status_code == status.HTTP_201_CREATED
# Verify workspace was created
assert Workspace.objects.count() == 1
# Check if the member is created
assert WorkspaceMember.objects.count() == 1
# Check other values
workspace = Workspace.objects.get(slug=workspace_data["slug"])
workspace_member = WorkspaceMember.objects.filter(
workspace=workspace, member=user
).first()
assert workspace.owner == user
assert workspace_member.role == 20
# Verify the workspace_seed task was called
mock_workspace_seed.assert_called_once_with(response.data["id"])
@pytest.mark.django_db
@patch('plane.bgtasks.workspace_seed_task.workspace_seed.delay')
def test_create_duplicate_workspace(self, mock_workspace_seed, session_client):
"""Test creating a duplicate workspace"""
url = reverse("workspace")
# Create first workspace
session_client.post(
url, {"name": "Plane", "slug": "pla-ne"}, format="json"
)
# Try to create a workspace with the same slug
response = session_client.post(
url, {"name": "Plane", "slug": "pla-ne"}, format="json"
)
# The API returns 400 BAD REQUEST for duplicate slugs, not 409 CONFLICT
assert response.status_code == status.HTTP_400_BAD_REQUEST
# Optionally check the error message to confirm it's related to the duplicate slug
assert "slug" in response.data

View File

@@ -0,0 +1,82 @@
import factory
from uuid import uuid4
from django.utils import timezone
from plane.db.models import (
User,
Workspace,
WorkspaceMember,
Project,
ProjectMember
)
class UserFactory(factory.django.DjangoModelFactory):
"""Factory for creating User instances"""
class Meta:
model = User
django_get_or_create = ('email',)
id = factory.LazyFunction(uuid4)
email = factory.Sequence(lambda n: f'user{n}@plane.so')
password = factory.PostGenerationMethodCall('set_password', 'password')
first_name = factory.Sequence(lambda n: f'First{n}')
last_name = factory.Sequence(lambda n: f'Last{n}')
is_active = True
is_superuser = False
is_staff = False
class WorkspaceFactory(factory.django.DjangoModelFactory):
"""Factory for creating Workspace instances"""
class Meta:
model = Workspace
django_get_or_create = ('slug',)
id = factory.LazyFunction(uuid4)
name = factory.Sequence(lambda n: f'Workspace {n}')
slug = factory.Sequence(lambda n: f'workspace-{n}')
owner = factory.SubFactory(UserFactory)
created_at = factory.LazyFunction(timezone.now)
updated_at = factory.LazyFunction(timezone.now)
class WorkspaceMemberFactory(factory.django.DjangoModelFactory):
"""Factory for creating WorkspaceMember instances"""
class Meta:
model = WorkspaceMember
id = factory.LazyFunction(uuid4)
workspace = factory.SubFactory(WorkspaceFactory)
member = factory.SubFactory(UserFactory)
role = 20 # Admin role by default
created_at = factory.LazyFunction(timezone.now)
updated_at = factory.LazyFunction(timezone.now)
class ProjectFactory(factory.django.DjangoModelFactory):
"""Factory for creating Project instances"""
class Meta:
model = Project
django_get_or_create = ('name', 'workspace')
id = factory.LazyFunction(uuid4)
name = factory.Sequence(lambda n: f'Project {n}')
workspace = factory.SubFactory(WorkspaceFactory)
created_by = factory.SelfAttribute('workspace.owner')
updated_by = factory.SelfAttribute('workspace.owner')
created_at = factory.LazyFunction(timezone.now)
updated_at = factory.LazyFunction(timezone.now)
class ProjectMemberFactory(factory.django.DjangoModelFactory):
"""Factory for creating ProjectMember instances"""
class Meta:
model = ProjectMember
id = factory.LazyFunction(uuid4)
project = factory.SubFactory(ProjectFactory)
member = factory.SubFactory(UserFactory)
role = 20 # Admin role by default
created_at = factory.LazyFunction(timezone.now)
updated_at = factory.LazyFunction(timezone.now)

View File

View File

@@ -0,0 +1,100 @@
import pytest
import requests
from django.urls import reverse
@pytest.mark.smoke
class TestAuthSmoke:
"""Smoke tests for authentication endpoints"""
@pytest.mark.django_db
def test_login_endpoint_available(self, plane_server, create_user, user_data):
"""Test that the login endpoint is available and responds correctly"""
# Get the sign-in URL
relative_url = reverse("sign-in")
url = f"{plane_server.url}{relative_url}"
# 1. Test bad login - test with wrong password
response = requests.post(
url,
data={
"email": user_data["email"],
"password": "wrong-password"
}
)
# For bad credentials, any of these status codes would be valid
# The test shouldn't be brittle to minor implementation changes
assert response.status_code != 500, "Authentication should not cause server errors"
assert response.status_code != 404, "Authentication endpoint should exist"
if response.status_code == 200:
# If API returns 200 for failures, check the response body for error indication
if hasattr(response, 'json'):
try:
data = response.json()
# JSON response might indicate error in its structure
assert "error" in data or "error_code" in data or "detail" in data or response.url.endswith("sign-in"), \
"Error response should contain error details"
except ValueError:
# It's ok if response isn't JSON format
pass
elif response.status_code in [302, 303]:
# If it's a redirect, it should redirect to a login page or error page
redirect_url = response.headers.get('Location', '')
assert "error" in redirect_url or "sign-in" in redirect_url, \
"Failed login should redirect to login page or error page"
# 2. Test good login with correct credentials
response = requests.post(
url,
data={
"email": user_data["email"],
"password": user_data["password"]
},
allow_redirects=False # Don't follow redirects
)
# Successful auth should not be a client error or server error
assert response.status_code not in range(400, 600), \
f"Authentication with valid credentials failed with status {response.status_code}"
# Specific validation based on response type
if response.status_code in [302, 303]:
# Redirect-based auth: check that redirect URL doesn't contain error
redirect_url = response.headers.get('Location', '')
assert "error" not in redirect_url and "error_code" not in redirect_url, \
"Successful login redirect should not contain error parameters"
elif response.status_code == 200:
# API token-based auth: check for tokens or user session
if hasattr(response, 'json'):
try:
data = response.json()
# If it's a token response
if "access_token" in data:
assert "refresh_token" in data, "JWT auth should return both access and refresh tokens"
# If it's a user session response
elif "user" in data:
assert "is_authenticated" in data and data["is_authenticated"], \
"User session response should indicate authentication"
# Otherwise it should at least indicate success
else:
assert not any(error_key in data for error_key in ["error", "error_code", "detail"]), \
"Success response should not contain error keys"
except ValueError:
# Non-JSON is acceptable if it's a redirect or HTML response
pass
@pytest.mark.smoke
class TestHealthCheckSmoke:
"""Smoke test for health check endpoint"""
def test_healthcheck_endpoint(self, plane_server):
"""Test that the health check endpoint is available and responds correctly"""
# Make a request to the health check endpoint
response = requests.get(f"{plane_server.url}/")
# Should be OK
assert response.status_code == 200, "Health check endpoint should return 200 OK"

View File

View File

@@ -0,0 +1,50 @@
import pytest
from uuid import uuid4
from plane.db.models import Workspace, WorkspaceMember, User
@pytest.mark.unit
class TestWorkspaceModel:
"""Test the Workspace model"""
@pytest.mark.django_db
def test_workspace_creation(self, create_user):
"""Test creating a workspace"""
# Create a workspace
workspace = Workspace.objects.create(
name="Test Workspace",
slug="test-workspace",
id=uuid4(),
owner=create_user
)
# Verify it was created
assert workspace.id is not None
assert workspace.name == "Test Workspace"
assert workspace.slug == "test-workspace"
assert workspace.owner == create_user
@pytest.mark.django_db
def test_workspace_member_creation(self, create_user):
"""Test creating a workspace member"""
# Create a workspace
workspace = Workspace.objects.create(
name="Test Workspace",
slug="test-workspace",
id=uuid4(),
owner=create_user
)
# Create a workspace member
workspace_member = WorkspaceMember.objects.create(
workspace=workspace,
member=create_user,
role=20 # Admin role
)
# Verify it was created
assert workspace_member.id is not None
assert workspace_member.workspace == workspace
assert workspace_member.member == create_user
assert workspace_member.role == 20

View File

@@ -0,0 +1,71 @@
import pytest
from uuid import uuid4
from plane.api.serializers import WorkspaceLiteSerializer
from plane.db.models import Workspace, User
@pytest.mark.unit
class TestWorkspaceLiteSerializer:
"""Test the WorkspaceLiteSerializer"""
def test_workspace_lite_serializer_fields(self, db):
"""Test that the serializer includes the correct fields"""
# Create a user to be the owner
owner = User.objects.create(
email="test@example.com",
first_name="Test",
last_name="User"
)
# Create a workspace with explicit ID to test serialization
workspace_id = uuid4()
workspace = Workspace.objects.create(
name="Test Workspace",
slug="test-workspace",
id=workspace_id,
owner=owner
)
# Serialize the workspace
serialized_data = WorkspaceLiteSerializer(workspace).data
# Check fields are present and correct
assert "name" in serialized_data
assert "slug" in serialized_data
assert "id" in serialized_data
assert serialized_data["name"] == "Test Workspace"
assert serialized_data["slug"] == "test-workspace"
assert str(serialized_data["id"]) == str(workspace_id)
def test_workspace_lite_serializer_read_only(self, db):
"""Test that the serializer fields are read-only"""
# Create a user to be the owner
owner = User.objects.create(
email="test2@example.com",
first_name="Test",
last_name="User"
)
# Create a workspace
workspace = Workspace.objects.create(
name="Test Workspace",
slug="test-workspace",
id=uuid4(),
owner=owner
)
# Try to update via serializer
serializer = WorkspaceLiteSerializer(
workspace,
data={"name": "Updated Name", "slug": "updated-slug"}
)
# Serializer should be valid (since read-only fields are ignored)
assert serializer.is_valid()
# Save should not update the read-only fields
updated_workspace = serializer.save()
assert updated_workspace.name == "Test Workspace"
assert updated_workspace.slug == "test-workspace"

View File

@@ -0,0 +1,49 @@
import uuid
import pytest
from plane.utils.uuid import is_valid_uuid, convert_uuid_to_integer
@pytest.mark.unit
class TestUUIDUtils:
"""Test the UUID utilities"""
def test_is_valid_uuid_with_valid_uuid(self):
"""Test is_valid_uuid with a valid UUID"""
# Generate a valid UUID
valid_uuid = str(uuid.uuid4())
assert is_valid_uuid(valid_uuid) is True
def test_is_valid_uuid_with_invalid_uuid(self):
"""Test is_valid_uuid with invalid UUID strings"""
# Test with different invalid formats
assert is_valid_uuid("not-a-uuid") is False
assert is_valid_uuid("123456789") is False
assert is_valid_uuid("") is False
assert is_valid_uuid("00000000-0000-0000-0000-000000000000") is False # This is a valid UUID but version 1
def test_convert_uuid_to_integer(self):
"""Test convert_uuid_to_integer function"""
# Create a known UUID
test_uuid = uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Convert to integer
result = convert_uuid_to_integer(test_uuid)
# Check that the result is an integer
assert isinstance(result, int)
# Ensure consistent results with the same input
assert convert_uuid_to_integer(test_uuid) == result
# Different UUIDs should produce different integers
different_uuid = uuid.UUID("550e8400-e29b-41d4-a716-446655440000")
assert convert_uuid_to_integer(different_uuid) != result
def test_convert_uuid_to_integer_string_input(self):
"""Test convert_uuid_to_integer handles string UUID"""
# Test with a UUID string
test_uuid_str = "f47ac10b-58cc-4372-a567-0e02b2c3d479"
test_uuid = uuid.UUID(test_uuid_str)
# Should get the same result whether passing UUID or string
assert convert_uuid_to_integer(test_uuid) == convert_uuid_to_integer(test_uuid_str)

17
apiserver/pytest.ini Normal file
View File

@@ -0,0 +1,17 @@
[pytest]
DJANGO_SETTINGS_MODULE = plane.settings.test
python_files = test_*.py
python_classes = Test*
python_functions = test_*
markers =
unit: Unit tests for models, serializers, and utility functions
contract: Contract tests for API endpoints
smoke: Smoke tests for critical functionality
slow: Tests that are slow and might be skipped in some contexts
addopts =
--strict-markers
--reuse-db
--nomigrations
-vs

View File

@@ -1,4 +1,12 @@
-r base.txt
# test checker
pytest==7.1.2
coverage==6.5.0
# test framework
pytest==7.4.0
pytest-django==4.5.2
pytest-cov==4.1.0
pytest-xdist==3.3.1
pytest-mock==3.11.1
factory-boy==3.3.0
freezegun==1.2.2
coverage==7.2.7
httpx==0.24.1
requests==2.32.2

91
apiserver/run_tests.py Executable file
View File

@@ -0,0 +1,91 @@
#!/usr/bin/env python
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(description="Run Plane tests")
parser.add_argument(
"-u", "--unit",
action="store_true",
help="Run unit tests only"
)
parser.add_argument(
"-c", "--contract",
action="store_true",
help="Run contract tests only"
)
parser.add_argument(
"-s", "--smoke",
action="store_true",
help="Run smoke tests only"
)
parser.add_argument(
"-o", "--coverage",
action="store_true",
help="Generate coverage report"
)
parser.add_argument(
"-p", "--parallel",
action="store_true",
help="Run tests in parallel"
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Verbose output"
)
args = parser.parse_args()
# Build command
cmd = ["python", "-m", "pytest"]
markers = []
# Add test markers
if args.unit:
markers.append("unit")
if args.contract:
markers.append("contract")
if args.smoke:
markers.append("smoke")
# Add markers filter
if markers:
cmd.extend(["-m", " or ".join(markers)])
# Add coverage
if args.coverage:
cmd.extend(["--cov=plane", "--cov-report=term", "--cov-report=html"])
# Add parallel
if args.parallel:
cmd.extend(["-n", "auto"])
# Add verbose
if args.verbose:
cmd.append("-v")
# Add common flags
cmd.extend(["--reuse-db", "--nomigrations"])
# Print command
print(f"Running: {' '.join(cmd)}")
# Execute command
result = subprocess.run(cmd)
# Check coverage thresholds if coverage is enabled
if args.coverage:
print("Checking coverage thresholds...")
coverage_cmd = ["python", "-m", "coverage", "report", "--fail-under=90"]
coverage_result = subprocess.run(coverage_cmd)
if coverage_result.returncode != 0:
print("Coverage below threshold (90%)")
sys.exit(coverage_result.returncode)
sys.exit(result.returncode)
if __name__ == "__main__":
main()

4
apiserver/run_tests.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
# This is a simple wrapper script that calls the main test runner in the tests directory
exec tests/run_tests.sh "$@"

View File

@@ -209,7 +209,7 @@
{% for actor_comment in comment.actor_comments.new_value %}
<tr>
<td>
<div style=" padding: 6px 10px; margin-left: 10px; background-color: white; font-size: 0.8rem; color: #525252; margin-top: 5px; border-radius: 4px; display: flex; align-items: center; " >
<div style=" padding: 6px 10px; margin-left: 10px; background-color: white; font-size: 0.8rem; color: #525252; margin-top: 5px; border-radius: 4px; overflow-x: scroll; max-width: 430px;" >
<p> {{ actor_comment|safe }} </p>
</div>
</td>

View File

@@ -1 +1,14 @@
export const MAX_FILE_SIZE = 5 * 1024 * 1024; // 5MB
export const ACCEPTED_AVATAR_IMAGE_MIME_TYPES_FOR_REACT_DROPZONE = {
"image/jpeg": [],
"image/jpg": [],
"image/png": [],
"image/webp": [],
};
export const ACCEPTED_COVER_IMAGE_MIME_TYPES_FOR_REACT_DROPZONE = {
"image/jpeg": [],
"image/jpg": [],
"image/png": [],
"image/webp": [],
};

View File

@@ -355,7 +355,7 @@ export const ISSUE_DISPLAY_FILTERS_BY_PAGE: TIssueFiltersToDisplayByPageType = {
sub_work_items: {
list: {
display_properties: SUB_ISSUES_DISPLAY_PROPERTIES_KEYS,
filters: ["priority", "state", "project", "issue_type", "assignees", "start_date", "target_date"],
filters: ["priority", "state", "issue_type", "assignees", "start_date", "target_date"],
display_filters: {
order_by: ["-created_at", "-updated_at", "start_date", "-priority"],
group_by: ["state", "priority", "assignees", null],

View File

@@ -5,7 +5,7 @@ const project = resolve(process.cwd(), "tsconfig.json");
/** @type {import("eslint").Linter.Config} */
module.exports = {
extends: ["prettier", "plugin:@typescript-eslint/recommended"],
plugins: ["react", "@typescript-eslint", "import"],
plugins: ["react", "react-hooks", "@typescript-eslint", "import"],
globals: {
React: true,
JSX: true,
@@ -38,7 +38,7 @@ module.exports = {
"react/self-closing-comp": ["error", { component: true, html: true }],
"react/jsx-boolean-value": "error",
"react/jsx-no-duplicate-props": "error",
// "react-hooks/exhaustive-deps": "warn",
"react-hooks/exhaustive-deps": "warn",
"@typescript-eslint/no-unused-expressions": "warn",
"@typescript-eslint/no-unused-vars": ["warn"],
"@typescript-eslint/no-explicit-any": "warn",

View File

@@ -17,6 +17,7 @@
"eslint-config-turbo": "^1.12.4",
"eslint-plugin-import": "^2.29.1",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^5.2.0",
"typescript": "5.3.3"
}
}

View File

@@ -746,7 +746,8 @@
"message": "Něco se pokazilo. Zkuste to prosím znovu."
},
"required": "Toto pole je povinné",
"entity_required": "{entity} je povinná"
"entity_required": "{entity} je povinná",
"restricted_entity": "{entity} je omezen"
},
"update_link": "Aktualizovat odkaz",
"attach": "Připojit",
@@ -1107,6 +1108,18 @@
"remove": {
"success": "Podřízená pracovní položka úspěšně odebrána",
"error": "Chyba při odebírání podřízené položky"
},
"empty_state": {
"sub_list_filters": {
"title": "Nemáte podřízené pracovní položky, které odpovídají použitým filtrům.",
"description": "Chcete-li zobrazit všechny podřízené pracovní položky, odstraňte všechny použité filtry.",
"action": "Odstranit filtry"
},
"list_filters": {
"title": "Nemáte pracovní položky, které odpovídají použitým filtrům.",
"description": "Chcete-li zobrazit všechny pracovní položky, odstraňte všechny použité filtry.",
"action": "Odstranit filtry"
}
}
},
"view": {

View File

@@ -746,7 +746,8 @@
"message": "Etwas ist schiefgelaufen. Bitte versuchen Sie es erneut."
},
"required": "Dieses Feld ist erforderlich",
"entity_required": "{entity} ist erforderlich"
"entity_required": "{entity} ist erforderlich",
"restricted_entity": "{entity} ist eingeschränkt"
},
"update_link": "Link aktualisieren",
"attach": "Anhängen",
@@ -1107,6 +1108,18 @@
"remove": {
"success": "Untergeordnetes Arbeitselement erfolgreich entfernt",
"error": "Fehler beim Entfernen des untergeordneten Elements"
},
"empty_state": {
"sub_list_filters": {
"title": "Sie haben keine untergeordneten Arbeitselemente, die den von Ihnen angewendeten Filtern entsprechen.",
"description": "Um alle untergeordneten Arbeitselemente anzuzeigen, entfernen Sie alle angewendeten Filter.",
"action": "Filter entfernen"
},
"list_filters": {
"title": "Sie haben keine Arbeitselemente, die den von Ihnen angewendeten Filtern entsprechen.",
"description": "Um alle Arbeitselemente anzuzeigen, entfernen Sie alle angewendeten Filter.",
"action": "Filter entfernen"
}
}
},
"view": {

View File

@@ -580,7 +580,8 @@
"message": "Something went wrong. Please try again."
},
"required": "This field is required",
"entity_required": "{entity} is required"
"entity_required": "{entity} is required",
"restricted_entity": "{entity} is restricted"
},
"update_link": "Update link",
"attach": "Attach",
@@ -943,6 +944,18 @@
"remove": {
"success": "Sub-work item removed successfully",
"error": "Error removing sub-work item"
},
"empty_state": {
"sub_list_filters": {
"title": "You don't have sub-work items that match the filters you've applied.",
"description": "To see all sub-work items, clear all applied filters.",
"action": "Clear filters"
},
"list_filters": {
"title": "You don't have work items that match the filters you've applied.",
"description": "To see all work items, clear all applied filters.",
"action": "Clear filters"
}
}
},
"view": {
@@ -2283,4 +2296,4 @@
"previously_edited_by": "Previously edited by",
"edited_by": "Edited by"
}
}
}

View File

@@ -750,7 +750,8 @@
"message": "Algo salió mal. Por favor, inténtalo de nuevo."
},
"required": "Este campo es obligatorio",
"entity_required": "{entity} es obligatorio"
"entity_required": "{entity} es obligatorio",
"restricted_entity": "{entity} está restringido"
},
"update_link": "Actualizar enlace",
"attach": "Adjuntar",
@@ -1110,6 +1111,18 @@
"remove": {
"success": "Sub-elemento eliminado correctamente",
"error": "Error al eliminar el sub-elemento"
},
"empty_state": {
"sub_list_filters": {
"title": "No tienes sub-elementos de trabajo que coincidan con los filtros que has aplicado.",
"description": "Para ver todos los sub-elementos de trabajo, elimina todos los filtros aplicados.",
"action": "Eliminar filtros"
},
"list_filters": {
"title": "No tienes elementos de trabajo que coincidan con los filtros que has aplicado.",
"description": "Para ver todos los elementos de trabajo, elimina todos los filtros aplicados.",
"action": "Eliminar filtros"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "Quelque chose s'est mal passé. Veuillez réessayer."
},
"required": "Ce champ est obligatoire",
"entity_required": "{entity} est requis"
"entity_required": "{entity} est requis",
"restricted_entity": "{entity} est restreint"
},
"update_link": "Mettre à jour le lien",
"attach": "Joindre",
@@ -1108,6 +1109,18 @@
"remove": {
"success": "Sous-élément de travail supprimé avec succès",
"error": "Erreur lors de la suppression du sous-élément de travail"
},
"empty_state": {
"sub_list_filters": {
"title": "Vous n'avez pas de sous-éléments de travail qui correspondent aux filtres que vous avez appliqués.",
"description": "Pour voir tous les sous-éléments de travail, effacer tous les filtres appliqués.",
"action": "Effacer les filtres"
},
"list_filters": {
"title": "Vous n'avez pas d'éléments de travail qui correspondent aux filtres que vous avez appliqués.",
"description": "Pour voir tous les éléments de travail, effacer tous les filtres appliqués.",
"action": "Effacer les filtres"
}
}
},
"view": {

View File

@@ -746,7 +746,8 @@
"message": "Sesuatu telah salah. Silakan coba lagi."
},
"required": "Bidang ini diperlukan",
"entity_required": "{entity} diperlukan"
"entity_required": "{entity} diperlukan",
"restricted_entity": "{entity} dibatasi"
},
"update_link": "Perbarui tautan",
"attach": "Lampirkan",
@@ -1107,6 +1108,18 @@
"remove": {
"success": "Sub-item kerja berhasil dihapus",
"error": "Kesalahan saat menghapus sub-item kerja"
},
"empty_state": {
"sub_list_filters": {
"title": "Anda tidak memiliki sub-item kerja yang cocok dengan filter yang Anda terapkan.",
"description": "Untuk melihat semua sub-item kerja, hapus semua filter yang diterapkan.",
"action": "Hapus filter"
},
"list_filters": {
"title": "Anda tidak memiliki item kerja yang cocok dengan filter yang Anda terapkan.",
"description": "Untuk melihat semua item kerja, hapus semua filter yang diterapkan.",
"action": "Hapus filter"
}
}
},
"view": {

View File

@@ -743,7 +743,8 @@
"message": "Qualcosa è andato storto. Per favore, riprova."
},
"required": "Questo campo è obbligatorio",
"entity_required": "{entity} è obbligatorio"
"entity_required": "{entity} è obbligatorio",
"restricted_entity": "{entity} è limitato"
},
"update_link": "Aggiorna link",
"attach": "Allega",
@@ -1106,6 +1107,18 @@
"remove": {
"success": "Sotto-elemento di lavoro rimosso con successo",
"error": "Errore nella rimozione del sotto-elemento di lavoro"
},
"empty_state": {
"sub_list_filters": {
"title": "Non hai sotto-elementi di lavoro che corrispondono ai filtri che hai applicato.",
"description": "Per vedere tutti i sotto-elementi di lavoro, cancella tutti i filtri applicati.",
"action": "Cancella filtri"
},
"list_filters": {
"title": "Non hai elementi di lavoro che corrispondono ai filtri che hai applicato.",
"description": "Per vedere tutti gli elementi di lavoro, cancella tutti i filtri applicati.",
"action": "Cancella filtri"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "問題が発生しました。もう一度お試しください。"
},
"required": "この項目は必須です",
"entity_required": "{entity}は必須です"
"entity_required": "{entity}は必須です",
"restricted_entity": "{entity} は制限されています"
},
"update_link": "リンクを更新",
"attach": "添付",
@@ -1108,6 +1109,18 @@
"remove": {
"success": "サブ作業項目を削除しました",
"error": "サブ作業項目の削除中にエラーが発生しました"
},
"empty_state": {
"sub_list_filters": {
"title": "適用されたフィルターに一致するサブ作業項目がありません。",
"description": "すべてのサブ作業項目を表示するには、すべての適用されたフィルターをクリアしてください。",
"action": "フィルターをクリア"
},
"list_filters": {
"title": "適用されたフィルターに一致する作業項目がありません。",
"description": "すべての作業項目を表示するには、すべての適用されたフィルターをクリアしてください。",
"action": "フィルターをクリア"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "문제가 발생했습니다. 다시 시도해주세요."
},
"required": "이 필드는 필수입니다",
"entity_required": "{entity}가 필요합니다"
"entity_required": "{entity}가 필요합니다",
"restricted_entity": "{entity}은(는) 제한되어 있습니다"
},
"update_link": "링크 업데이트",
"attach": "첨부",
@@ -1109,6 +1110,18 @@
"remove": {
"success": "하위 작업 항목이 성공적으로 제거되었습니다",
"error": "하위 작업 항목 제거 중 오류 발생"
},
"empty_state": {
"sub_list_filters": {
"title": "적용된 필터에 일치하는 하위 작업 항목이 없습니다.",
"description": "모든 하위 작업 항목을 보려면 모든 적용된 필터를 지우세요.",
"action": "필터 지우기"
},
"list_filters": {
"title": "적용된 필터에 일치하는 작업 항목이 없습니다.",
"description": "모든 작업 항목을 보려면 모든 적용된 필터를 지우세요.",
"action": "필터 지우기"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "Coś poszło nie tak. Spróbuj ponownie."
},
"required": "To pole jest wymagane",
"entity_required": "{entity} jest wymagane"
"entity_required": "{entity} jest wymagane",
"restricted_entity": "{entity} jest ograniczony"
},
"update_link": "Zaktualizuj link",
"attach": "Dołącz",
@@ -1109,6 +1110,18 @@
"remove": {
"success": "Podrzędny element pracy usunięto pomyślnie",
"error": "Błąd podczas usuwania elementu podrzędnego"
},
"empty_state": {
"sub_list_filters": {
"title": "Nie masz elementów podrzędnych, które pasują do filtrów, które zastosowałeś.",
"description": "Aby zobaczyć wszystkie elementy podrzędne, wyczyść wszystkie zastosowane filtry.",
"action": "Wyczyść filtry"
},
"list_filters": {
"title": "Nie masz elementów pracy, które pasują do filtrów, które zastosowałeś.",
"description": "Aby zobaczyć wszystkie elementy pracy, wyczyść wszystkie zastosowane filtry.",
"action": "Wyczyść filtry"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "Algo deu errado. Por favor, tente novamente."
},
"required": "Este campo é obrigatório",
"entity_required": "{entity} é obrigatório"
"entity_required": "{entity} é obrigatório",
"restricted_entity": "{entity} está restrito"
},
"update_link": "Atualizar link",
"attach": "Anexar",
@@ -1109,6 +1110,18 @@
"remove": {
"success": "Sub-item de trabalho removido com sucesso",
"error": "Erro ao remover sub-item de trabalho"
},
"empty_state": {
"sub_list_filters": {
"title": "Você não tem sub-itens de trabalho que correspondem aos filtros que você aplicou.",
"description": "Para ver todos os sub-itens de trabalho, limpe todos os filtros aplicados.",
"action": "Limpar filtros"
},
"list_filters": {
"title": "Você não tem itens de trabalho que correspondem aos filtros que você aplicou.",
"description": "Para ver todos os itens de trabalho, limpe todos os filtros aplicados.",
"action": "Limpar filtros"
}
}
},
"view": {

View File

@@ -746,7 +746,8 @@
"message": "Ceva a funcționat greșit. Te rugăm să încerci din nou."
},
"required": "Acest câmp este obligatoriu",
"entity_required": "{entity} este obligatoriu"
"entity_required": "{entity} este obligatoriu",
"restricted_entity": "{entity} este restricționat"
},
"update_link": "Actualizează link-ul",
"attach": "Atașează",
@@ -1107,6 +1108,18 @@
"remove": {
"success": "Sub-activitatea a fost eliminată cu succes",
"error": "Eroare la eliminarea sub-activității"
},
"empty_state": {
"sub_list_filters": {
"title": "Nu ai sub-elemente de lucru care corespund filtrelor pe care le-ai aplicat.",
"description": "Pentru a vedea toate sub-elementele de lucru, șterge toate filtrele aplicate.",
"action": "Șterge filtrele"
},
"list_filters": {
"title": "Nu ai elemente de lucru care corespund filtrelor pe care le-ai aplicat.",
"description": "Pentru a vedea toate elementele de lucru, șterge toate filtrele aplicate.",
"action": "Șterge filtrele"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "Что-то пошло не так. Попробуйте позже."
},
"required": "Это поле обязательно",
"entity_required": "{entity} обязательно"
"entity_required": "{entity} обязательно",
"restricted_entity": "{entity} ограничен"
},
"update_link": "обновить ссылку",
"attach": "Прикрепить",
@@ -1109,6 +1110,18 @@
"remove": {
"success": "Подэлемент успешно удален",
"error": "Ошибка удаления подэлемента"
},
"empty_state": {
"sub_list_filters": {
"title": "У вас нет подэлементов, которые соответствуют примененным фильтрам.",
"description": "Чтобы увидеть все подэлементы, очистите все примененные фильтры.",
"action": "Очистить фильтры"
},
"list_filters": {
"title": "У вас нет рабочих элементов, которые соответствуют примененным фильтрам.",
"description": "Чтобы увидеть все рабочие элементы, очистите все примененные фильтры.",
"action": "Очистить фильтры"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "Niečo sa pokazilo. Skúste to prosím znova."
},
"required": "Toto pole je povinné",
"entity_required": "{entity} je povinná"
"entity_required": "{entity} je povinná",
"restricted_entity": "{entity} je obmedzený"
},
"update_link": "Aktualizovať odkaz",
"attach": "Pripojiť",
@@ -1109,6 +1110,18 @@
"remove": {
"success": "Podriadená pracovná položka bola úspešne odstránená",
"error": "Chyba pri odstraňovaní podriadenej položky"
},
"empty_state": {
"sub_list_filters": {
"title": "Nemáte podriadené pracovné položky, ktoré zodpovedajú použitým filtrom.",
"description": "Pre zobrazenie všetkých podriadených pracovných položiek vymažte všetky použité filtre.",
"action": "Vymazať filtre"
},
"list_filters": {
"title": "Nemáte pracovné položky, ktoré zodpovedajú použitým filtrom.",
"description": "Pre zobrazenie všetkých pracovných položiek vymažte všetky použité filtre.",
"action": "Vymazať filtre"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "Bir hata oluştu. Lütfen tekrar deneyin."
},
"required": "Bu alan gereklidir",
"entity_required": "{entity} gereklidir"
"entity_required": "{entity} gereklidir",
"restricted_entity": "{entity} kısıtlanmıştır"
},
"update_link": "Bağlantıyı güncelle",
"attach": "Ekle",
@@ -1110,6 +1111,18 @@
"remove": {
"success": "Alt iş öğesi başarıyla kaldırıldı",
"error": "Alt iş öğesi kaldırılırken hata oluştu"
},
"empty_state": {
"sub_list_filters": {
"title": "Alt iş öğelerinizin filtreleriyle eşleşmiyor.",
"description": "Tüm alt iş öğelerini görmek için tüm uygulanan filtreleri temizleyin.",
"action": "Filtreleri temizle"
},
"list_filters": {
"title": "İş öğelerinizin filtreleriyle eşleşmiyor.",
"description": "Tüm iş öğelerini görmek için tüm uygulanan filtreleri temizleyin.",
"action": "Filtreleri temizle"
}
}
},
"view": {

View File

@@ -747,8 +747,9 @@
"title": "Помилка!",
"message": "Щось пішло не так. Будь ласка, спробуйте ще раз."
},
"required": "Це поле є обовязковим",
"entity_required": "{entity} є обовязковим"
"required": "Це поле є обов'язковим",
"entity_required": "{entity} є обов'язковим",
"restricted_entity": "{entity} обмежено"
},
"update_link": "Оновити посилання",
"attach": "Прикріпити",
@@ -1109,6 +1110,18 @@
"remove": {
"success": "Похідну робочу одиницю успішно вилучено",
"error": "Помилка під час вилучення похідної одиниці"
},
"empty_state": {
"sub_list_filters": {
"title": "Ви не маєте похідних робочих одиниць, які відповідають застосованим фільтрам.",
"description": "Щоб побачити всі похідні робочі одиниці, очистіть всі застосовані фільтри.",
"action": "Очистити фільтри"
},
"list_filters": {
"title": "Ви не маєте робочих одиниць, які відповідають застосованим фільтрам.",
"description": "Щоб побачити всі робочі одиниці, очистіть всі застосовані фільтри.",
"action": "Очистити фільтри"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "Đã xảy ra lỗi. Vui lòng thử lại."
},
"required": "Trường này là bắt buộc",
"entity_required": "{entity} là bắt buộc"
"entity_required": "{entity} là bắt buộc",
"restricted_entity": "{entity} bị hạn chế"
},
"update_link": "Cập nhật liên kết",
"attach": "Đính kèm",
@@ -1108,6 +1109,18 @@
"remove": {
"success": "Đã xóa mục công việc con thành công",
"error": "Đã xảy ra lỗi khi xóa mục công việc con"
},
"empty_state": {
"sub_list_filters": {
"title": "Bạn không có mục công việc con nào phù hợp với các bộ lọc mà bạn đã áp dụng.",
"description": "Để xem tất cả các mục công việc con, hãy xóa tất cả các bộ lọc đã áp dụng.",
"action": "Xóa bộ lọc"
},
"list_filters": {
"title": "Bạn không có mục công việc nào phù hợp với các bộ lọc mà bạn đã áp dụng.",
"description": "Để xem tất cả các mục công việc, hãy xóa tất cả các bộ lọc đã áp dụng.",
"action": "Xóa bộ lọc"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "发生错误。请重试。"
},
"required": "此字段为必填项",
"entity_required": "{entity}为必填项"
"entity_required": "{entity}为必填项",
"restricted_entity": "{entity}已被限制"
},
"update_link": "更新链接",
"attach": "附加",
@@ -1108,6 +1109,18 @@
"remove": {
"success": "子工作项移除成功",
"error": "移除子工作项时出错"
},
"empty_state": {
"sub_list_filters": {
"title": "您没有符合您应用的过滤器的子工作项。",
"description": "要查看所有子工作项,请清除所有应用的过滤器。",
"action": "清除过滤器"
},
"list_filters": {
"title": "您没有符合您应用的过滤器的工作项。",
"description": "要查看所有工作项,请清除所有应用的过滤器。",
"action": "清除过滤器"
}
}
},
"view": {

View File

@@ -748,7 +748,8 @@
"message": "發生錯誤。請再試一次。"
},
"required": "此欄位為必填",
"entity_required": "{entity} 為必填"
"entity_required": "{entity} 為必填",
"restricted_entity": "{entity}已被限制"
},
"update_link": "更新連結",
"attach": "附加",
@@ -1109,6 +1110,18 @@
"remove": {
"success": "子工作事項移除成功",
"error": "移除子工作事項時發生錯誤"
},
"empty_state": {
"sub_list_filters": {
"title": "您沒有符合您應用過的過濾器的子工作事項。",
"description": "要查看所有子工作事項,請清除所有應用過的過濾器。",
"action": "清除過濾器"
},
"list_filters": {
"title": "您沒有符合您應用過的過濾器的工作事項。",
"description": "要查看所有工作事項,請清除所有應用過的過濾器。",
"action": "清除過濾器"
}
}
},
"view": {

View File

@@ -461,9 +461,9 @@ module.exports = {
"onboarding-gradient-200": "var( --gradient-onboarding-200)",
"onboarding-gradient-300": "var( --gradient-onboarding-300)",
},
},
fontFamily: {
custom: ["Inter", "sans-serif"],
fontFamily: {
custom: ["Inter", "sans-serif"],
},
},
},
plugins: [

View File

@@ -1 +1 @@
export type TInstanceAIConfigurationKeys = "OPENAI_API_KEY" | "GPT_ENGINE";
export type TInstanceAIConfigurationKeys = "LLM_API_KEY" | "LLM_MODEL";

View File

@@ -49,7 +49,7 @@ export interface IInstanceConfig {
posthog_api_key: string | undefined;
posthog_host: string | undefined;
has_unsplash_configured: boolean;
has_openai_configured: boolean;
has_llm_configured: boolean;
file_size_limit: number | undefined;
is_smtp_configured: boolean;
app_base_url: string | undefined;

View File

@@ -4,7 +4,7 @@ export interface IDropdown {
// root props
onOpen?: () => void;
onClose?: () => void;
containerClassName?: (isOpen: boolean) => string;
containerClassName?: string | ((isOpen: boolean) => string);
tabIndex?: number;
placement?: Placement;
disabled?: boolean;

View File

@@ -1,19 +1,14 @@
import React, { FC, useMemo, useRef, useState } from "react";
import sortBy from "lodash/sortBy";
// headless ui
import { Combobox } from "@headlessui/react";
// popper-js
import sortBy from "lodash/sortBy";
import React, { FC, useMemo, useRef, useState } from "react";
import { usePopper } from "react-popper";
// plane helpers
// plane imports
import { useOutsideClickDetector } from "@plane/hooks";
// components
// local imports
import { cn } from "../../helpers";
import { useDropdownKeyPressed } from "../hooks/use-dropdown-key-pressed";
import { DropdownButton } from "./common";
import { DropdownOptions } from "./common/options";
// hooks
import { useDropdownKeyPressed } from "../hooks/use-dropdown-key-pressed";
// helper
import { cn } from "../../helpers";
// types
import { IMultiSelectDropdown } from "./dropdown";
export const MultiSelectDropdown: FC<IMultiSelectDropdown> = (props) => {
@@ -118,7 +113,10 @@ export const MultiSelectDropdown: FC<IMultiSelectDropdown> = (props) => {
ref={dropdownRef}
value={value}
onChange={onChange}
className={cn("h-full", containerClassName)}
className={cn(
"h-full",
typeof containerClassName === "function" ? containerClassName(isOpen) : containerClassName
)}
tabIndex={tabIndex}
multiple
onKeyDown={handleKeyDown}

View File

@@ -1,19 +1,14 @@
import React, { FC, useMemo, useRef, useState } from "react";
import sortBy from "lodash/sortBy";
// headless ui
import { Combobox } from "@headlessui/react";
// popper-js
import sortBy from "lodash/sortBy";
import React, { FC, useMemo, useRef, useState } from "react";
import { usePopper } from "react-popper";
// plane helpers
// plane imports
import { useOutsideClickDetector } from "@plane/hooks";
// components
// local imports
import { cn } from "../../helpers";
import { useDropdownKeyPressed } from "../hooks/use-dropdown-key-pressed";
import { DropdownButton } from "./common";
import { DropdownOptions } from "./common/options";
// hooks
import { useDropdownKeyPressed } from "../hooks/use-dropdown-key-pressed";
// helper
import { cn } from "../../helpers";
// types
import { ISingleSelectDropdown } from "./dropdown";
export const Dropdown: FC<ISingleSelectDropdown> = (props) => {
@@ -118,7 +113,10 @@ export const Dropdown: FC<ISingleSelectDropdown> = (props) => {
ref={dropdownRef}
value={value}
onChange={onChange}
className={cn("h-full", containerClassName)}
className={cn(
"h-full",
typeof containerClassName === "function" ? containerClassName(isOpen) : containerClassName
)}
tabIndex={tabIndex}
onKeyDown={handleKeyDown}
disabled={disabled}

View File

@@ -86,36 +86,6 @@ export const copyUrlToClipboard = async (path: string) => {
await copyTextToClipboard(url.toString());
};
/**
* @description Generates a deterministic HSL color based on input string
* @param {string} string - Input string to generate color from
* @returns {string} HSL color string
* @example
* generateRandomColor("hello") // returns consistent HSL color for "hello"
* generateRandomColor("") // returns "rgb(var(--color-primary-100))"
*/
export const generateRandomColor = (string: string): string => {
if (!string) return "rgb(var(--color-primary-100))";
string = `${string}`;
const uniqueId = string.length.toString() + string;
const combinedString = uniqueId + string;
const hash = Array.from(combinedString).reduce((acc, char) => {
const charCode = char.charCodeAt(0);
return (acc << 5) - acc + charCode;
}, 0);
const hue = hash % 360;
const saturation = 70;
const lightness = 60;
const randomColor = `hsl(${hue}, ${saturation}%, ${lightness}%)`;
return randomColor;
};
/**
* @description Gets first character of first word or first characters of first two words
* @param {string} str - Input string
@@ -275,6 +245,33 @@ export const checkURLValidity = (url: string): boolean => {
return urlPattern.test(url);
};
/**
* Combines array elements with a separator and adds a conjunction before the last element
* @param array Array of strings to combine
* @param separator Separator to use between elements (default: ", ")
* @param conjunction Conjunction to use before last element (default: "and")
* @returns Combined string with conjunction before the last element
*/
export const joinWithConjunction = (array: string[], separator: string = ", ", conjunction: string = "and"): string => {
if (!array || array.length === 0) return "";
if (array.length === 1) return array[0];
if (array.length === 2) return `${array[0]} ${conjunction} ${array[1]}`;
const lastElement = array[array.length - 1];
const elementsExceptLast = array.slice(0, -1);
return `${elementsExceptLast.join(separator)}${separator}${conjunction} ${lastElement}`;
};
/**
* @description Ensures a URL has a protocol
* @param {string} url
* @returns {string}
* @example
* ensureUrlHasProtocol("example.com") => "http://example.com"
*/
export const ensureUrlHasProtocol = (url: string): string => (url.startsWith("http") ? url : `http://${url}`);
// Browser-only clipboard functions
// let copyTextToClipboard: (text: string) => Promise<void>;

Some files were not shown because too many files have changed in this diff Show More