mirror of
https://github.com/makeplane/plane.git
synced 2025-12-25 16:19:43 +01:00
Merge branch 'preview' of github.com:makeplane/plane into chore-page-sort-order
This commit is contained in:
@@ -3,13 +3,7 @@ import random
|
||||
from rest_framework import serializers
|
||||
|
||||
# Module imports
|
||||
from plane.db.models import (
|
||||
Project,
|
||||
ProjectIdentifier,
|
||||
WorkspaceMember,
|
||||
State,
|
||||
Estimate,
|
||||
)
|
||||
from plane.db.models import Project, ProjectIdentifier, WorkspaceMember, State, Estimate
|
||||
|
||||
from plane.utils.content_validator import (
|
||||
validate_html_content,
|
||||
@@ -123,6 +117,7 @@ class ProjectCreateSerializer(BaseSerializer):
|
||||
|
||||
def create(self, validated_data):
|
||||
identifier = validated_data.get("identifier", "").strip().upper()
|
||||
|
||||
if identifier == "":
|
||||
raise serializers.ValidationError(detail="Project Identifier is required")
|
||||
|
||||
|
||||
@@ -210,7 +210,9 @@ class ProjectListCreateAPIEndpoint(BaseAPIView):
|
||||
"""
|
||||
try:
|
||||
workspace = Workspace.objects.get(slug=slug)
|
||||
|
||||
serializer = ProjectCreateSerializer(data={**request.data}, context={"workspace_id": workspace.id})
|
||||
|
||||
if serializer.is_valid():
|
||||
serializer.save()
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from plane.db.models import (
|
||||
ProjectIdentifier,
|
||||
DeployBoard,
|
||||
ProjectPublicMember,
|
||||
IssueSequence
|
||||
IssueSequence,
|
||||
)
|
||||
from plane.utils.content_validator import (
|
||||
validate_html_content,
|
||||
|
||||
@@ -15,9 +15,10 @@ from django.utils import timezone
|
||||
from django.db.models import Prefetch
|
||||
|
||||
# Module imports
|
||||
from plane.db.models import ExporterHistory, Issue, IssueRelation
|
||||
from plane.db.models import ExporterHistory, Issue, IssueComment, IssueRelation, IssueSubscriber
|
||||
from plane.utils.exception_logger import log_exception
|
||||
from plane.utils.exporters import Exporter, IssueExportSchema
|
||||
from plane.utils.porters.exporter import DataExporter
|
||||
from plane.utils.porters.serializers.issue import IssueExportSerializer
|
||||
|
||||
|
||||
def create_zip_file(files: List[tuple[str, str | bytes]]) -> io.BytesIO:
|
||||
@@ -159,10 +160,16 @@ def issue_export_task(
|
||||
"labels",
|
||||
"issue_cycle__cycle",
|
||||
"issue_module__module",
|
||||
"issue_comments",
|
||||
"assignees",
|
||||
"issue_subscribers",
|
||||
"issue_link",
|
||||
Prefetch(
|
||||
"issue_subscribers",
|
||||
queryset=IssueSubscriber.objects.select_related("subscriber"),
|
||||
),
|
||||
Prefetch(
|
||||
"issue_comments",
|
||||
queryset=IssueComment.objects.select_related("actor").order_by("created_at"),
|
||||
),
|
||||
Prefetch(
|
||||
"issue_relation",
|
||||
queryset=IssueRelation.objects.select_related("related_issue", "related_issue__project"),
|
||||
@@ -180,11 +187,7 @@ def issue_export_task(
|
||||
|
||||
# Create exporter for the specified format
|
||||
try:
|
||||
exporter = Exporter(
|
||||
format_type=provider,
|
||||
schema_class=IssueExportSchema,
|
||||
options={"list_joiner": ", "},
|
||||
)
|
||||
exporter = DataExporter(IssueExportSerializer, format_type=provider)
|
||||
except ValueError as e:
|
||||
# Invalid format type
|
||||
exporter_instance = ExporterHistory.objects.get(token=token_id)
|
||||
|
||||
@@ -116,6 +116,11 @@ class Project(BaseModel):
|
||||
external_source = models.CharField(max_length=255, null=True, blank=True)
|
||||
external_id = models.CharField(max_length=255, blank=True, null=True)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# Track if timezone is provided, if so, don't override it with the workspace timezone when saving
|
||||
self.is_timezone_provided = kwargs.get("timezone") is not None
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def cover_image_url(self):
|
||||
# Return cover image url
|
||||
@@ -155,7 +160,15 @@ class Project(BaseModel):
|
||||
ordering = ("-created_at",)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
from plane.db.models import Workspace
|
||||
|
||||
self.identifier = self.identifier.strip().upper()
|
||||
is_creating = self._state.adding
|
||||
|
||||
if is_creating and not self.is_timezone_provided:
|
||||
workspace = Workspace.objects.get(id=self.workspace_id)
|
||||
self.timezone = workspace.timezone
|
||||
|
||||
return super().save(*args, **kwargs)
|
||||
|
||||
|
||||
|
||||
@@ -147,6 +147,11 @@ class User(AbstractBaseUser, PermissionsMixin):
|
||||
return self.cover_image
|
||||
return None
|
||||
|
||||
@property
|
||||
def full_name(self):
|
||||
"""Return user's full name (first + last)."""
|
||||
return f"{self.first_name} {self.last_name}".strip()
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
self.email = self.email.lower().strip()
|
||||
self.mobile_number = self.mobile_number
|
||||
|
||||
15
apps/api/plane/utils/porters/__init__.py
Normal file
15
apps/api/plane/utils/porters/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from .formatters import BaseFormatter, CSVFormatter, JSONFormatter, XLSXFormatter
|
||||
from .exporter import DataExporter
|
||||
from .serializers import IssueExportSerializer
|
||||
|
||||
__all__ = [
|
||||
# Formatters
|
||||
"BaseFormatter",
|
||||
"CSVFormatter",
|
||||
"JSONFormatter",
|
||||
"XLSXFormatter",
|
||||
# Exporters
|
||||
"DataExporter",
|
||||
# Export Serializers
|
||||
"IssueExportSerializer",
|
||||
]
|
||||
103
apps/api/plane/utils/porters/exporter.py
Normal file
103
apps/api/plane/utils/porters/exporter.py
Normal file
@@ -0,0 +1,103 @@
|
||||
from typing import Dict, List, Union
|
||||
from .formatters import BaseFormatter, CSVFormatter, JSONFormatter, XLSXFormatter
|
||||
|
||||
|
||||
class DataExporter:
|
||||
"""
|
||||
Export data using DRF serializers with built-in format support.
|
||||
|
||||
Usage:
|
||||
# New simplified interface
|
||||
exporter = DataExporter(BookSerializer, format_type='csv')
|
||||
filename, content = exporter.export('books_export', queryset)
|
||||
|
||||
# Legacy interface (still supported)
|
||||
exporter = DataExporter(BookSerializer)
|
||||
csv_string = exporter.to_string(queryset, CSVFormatter())
|
||||
"""
|
||||
|
||||
# Available formatters
|
||||
FORMATTERS = {
|
||||
"csv": CSVFormatter,
|
||||
"json": JSONFormatter,
|
||||
"xlsx": XLSXFormatter,
|
||||
}
|
||||
|
||||
def __init__(self, serializer_class, format_type: str = None, **serializer_kwargs):
|
||||
"""
|
||||
Initialize exporter with serializer and optional format type.
|
||||
|
||||
Args:
|
||||
serializer_class: DRF serializer class to use for data serialization
|
||||
format_type: Optional format type (csv, json, xlsx). If provided, enables export() method.
|
||||
**serializer_kwargs: Additional kwargs to pass to serializer
|
||||
"""
|
||||
self.serializer_class = serializer_class
|
||||
self.serializer_kwargs = serializer_kwargs
|
||||
self.format_type = format_type
|
||||
self.formatter = None
|
||||
|
||||
if format_type:
|
||||
if format_type not in self.FORMATTERS:
|
||||
raise ValueError(f"Unsupported format: {format_type}. Available: {list(self.FORMATTERS.keys())}")
|
||||
# Create formatter with default options
|
||||
self.formatter = self._create_formatter(format_type)
|
||||
|
||||
def _create_formatter(self, format_type: str) -> BaseFormatter:
|
||||
"""Create formatter instance with appropriate options."""
|
||||
formatter_class = self.FORMATTERS[format_type]
|
||||
|
||||
# Apply format-specific options
|
||||
if format_type == "xlsx":
|
||||
return formatter_class(list_joiner=", ")
|
||||
else:
|
||||
return formatter_class()
|
||||
|
||||
def serialize(self, queryset) -> List[Dict]:
|
||||
"""QuerySet → list of dicts"""
|
||||
serializer = self.serializer_class(
|
||||
queryset,
|
||||
many=True,
|
||||
**self.serializer_kwargs
|
||||
)
|
||||
return serializer.data
|
||||
|
||||
def export(self, filename: str, queryset) -> tuple[str, Union[str, bytes]]:
|
||||
"""
|
||||
Export queryset to file with configured format.
|
||||
|
||||
Args:
|
||||
filename: Base filename (without extension)
|
||||
queryset: Django QuerySet to export
|
||||
|
||||
Returns:
|
||||
Tuple of (filename_with_extension, content)
|
||||
|
||||
Raises:
|
||||
ValueError: If format_type was not provided during initialization
|
||||
"""
|
||||
if not self.formatter:
|
||||
raise ValueError("format_type must be provided during initialization to use export() method")
|
||||
|
||||
data = self.serialize(queryset)
|
||||
content = self.formatter.encode(data)
|
||||
full_filename = f"{filename}.{self.formatter.extension}"
|
||||
|
||||
return full_filename, content
|
||||
|
||||
def to_string(self, queryset, formatter: BaseFormatter) -> Union[str, bytes]:
|
||||
"""Export to formatted string (legacy interface)"""
|
||||
data = self.serialize(queryset)
|
||||
return formatter.encode(data)
|
||||
|
||||
def to_file(self, queryset, filepath: str, formatter: BaseFormatter) -> str:
|
||||
"""Export to file (legacy interface)"""
|
||||
content = self.to_string(queryset, formatter)
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
return filepath
|
||||
|
||||
@classmethod
|
||||
def get_available_formats(cls) -> List[str]:
|
||||
"""Get list of available export formats."""
|
||||
return list(cls.FORMATTERS.keys())
|
||||
265
apps/api/plane/utils/porters/formatters.py
Normal file
265
apps/api/plane/utils/porters/formatters.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""
|
||||
Import/Export System with Pluggable Formatters
|
||||
|
||||
Exporter: QuerySet → Serializer → Formatter → File/String
|
||||
Importer: File/String → Formatter → Serializer → Models
|
||||
"""
|
||||
|
||||
import csv
|
||||
import json
|
||||
from abc import ABC, abstractmethod
|
||||
from io import BytesIO, StringIO
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from openpyxl import Workbook, load_workbook
|
||||
|
||||
|
||||
class BaseFormatter(ABC):
|
||||
@abstractmethod
|
||||
def encode(self, data: List[Dict]) -> Union[str, bytes]:
|
||||
"""Data → formatted string/bytes"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def decode(self, content: Union[str, bytes]) -> List[Dict]:
|
||||
"""Formatted string/bytes → data"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def extension(self) -> str:
|
||||
pass
|
||||
|
||||
|
||||
class JSONFormatter(BaseFormatter):
|
||||
def __init__(self, indent: int = 2):
|
||||
self.indent = indent
|
||||
|
||||
def encode(self, data: List[Dict]) -> str:
|
||||
return json.dumps(data, indent=self.indent, default=str)
|
||||
|
||||
def decode(self, content: str) -> List[Dict]:
|
||||
return json.loads(content)
|
||||
|
||||
@property
|
||||
def extension(self) -> str:
|
||||
return "json"
|
||||
|
||||
|
||||
class CSVFormatter(BaseFormatter):
|
||||
def __init__(self, flatten: bool = True, delimiter: str = ",", prettify_headers: bool = True):
|
||||
"""
|
||||
Args:
|
||||
flatten: Whether to flatten nested dicts.
|
||||
delimiter: CSV delimiter character.
|
||||
prettify_headers: If True, transforms 'created_by_name' → 'Created By Name'.
|
||||
"""
|
||||
self.flatten = flatten
|
||||
self.delimiter = delimiter
|
||||
self.prettify_headers = prettify_headers
|
||||
|
||||
def _prettify_header(self, header: str) -> str:
|
||||
"""Transform 'created_by_name' → 'Created By Name'"""
|
||||
return header.replace("_", " ").title()
|
||||
|
||||
def _normalize_header(self, header: str) -> str:
|
||||
"""Transform 'Display Name' → 'display_name' (reverse of prettify)"""
|
||||
return header.strip().lower().replace(" ", "_")
|
||||
|
||||
def _flatten(self, row: Dict, parent_key: str = "") -> Dict:
|
||||
items = {}
|
||||
for key, value in row.items():
|
||||
new_key = f"{parent_key}__{key}" if parent_key else key
|
||||
if isinstance(value, dict):
|
||||
items.update(self._flatten(value, new_key))
|
||||
elif isinstance(value, list):
|
||||
items[new_key] = json.dumps(value)
|
||||
else:
|
||||
items[new_key] = value
|
||||
return items
|
||||
|
||||
def _unflatten(self, row: Dict) -> Dict:
|
||||
result = {}
|
||||
for key, value in row.items():
|
||||
parts = key.split("__")
|
||||
current = result
|
||||
for part in parts[:-1]:
|
||||
current = current.setdefault(part, {})
|
||||
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
parsed = json.loads(value)
|
||||
if isinstance(parsed, (list, dict)):
|
||||
value = parsed
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
current[parts[-1]] = value
|
||||
return result
|
||||
|
||||
def encode(self, data: List[Dict]) -> str:
|
||||
if not data:
|
||||
return ""
|
||||
|
||||
if self.flatten:
|
||||
data = [self._flatten(row) for row in data]
|
||||
|
||||
# Collect all unique field names in order
|
||||
fieldnames = []
|
||||
for row in data:
|
||||
for key in row.keys():
|
||||
if key not in fieldnames:
|
||||
fieldnames.append(key)
|
||||
|
||||
output = StringIO()
|
||||
|
||||
if self.prettify_headers:
|
||||
# Create header mapping: original_key → Pretty Header
|
||||
header_map = {key: self._prettify_header(key) for key in fieldnames}
|
||||
pretty_headers = [header_map[key] for key in fieldnames]
|
||||
|
||||
# Write pretty headers manually, then write data rows
|
||||
writer = csv.writer(output, delimiter=self.delimiter)
|
||||
writer.writerow(pretty_headers)
|
||||
|
||||
# Write data rows in the same field order
|
||||
for row in data:
|
||||
writer.writerow([row.get(key, "") for key in fieldnames])
|
||||
else:
|
||||
writer = csv.DictWriter(output, fieldnames=fieldnames, delimiter=self.delimiter)
|
||||
writer.writeheader()
|
||||
writer.writerows(data)
|
||||
|
||||
return output.getvalue()
|
||||
|
||||
def decode(self, content: str, normalize_headers: bool = True) -> List[Dict]:
|
||||
"""
|
||||
Decode CSV content to list of dicts.
|
||||
|
||||
Args:
|
||||
content: CSV string
|
||||
normalize_headers: If True, converts 'Display Name' → 'display_name'
|
||||
"""
|
||||
rows = list(csv.DictReader(StringIO(content), delimiter=self.delimiter))
|
||||
|
||||
# Normalize headers: 'Email' → 'email', 'Display Name' → 'display_name'
|
||||
if normalize_headers:
|
||||
rows = [{self._normalize_header(k): v for k, v in row.items()} for row in rows]
|
||||
|
||||
if self.flatten:
|
||||
rows = [self._unflatten(row) for row in rows]
|
||||
|
||||
return rows
|
||||
|
||||
@property
|
||||
def extension(self) -> str:
|
||||
return "csv"
|
||||
|
||||
|
||||
class XLSXFormatter(BaseFormatter):
|
||||
"""Formatter for XLSX (Excel) files using openpyxl."""
|
||||
|
||||
def __init__(self, prettify_headers: bool = True, list_joiner: str = ", "):
|
||||
"""
|
||||
Args:
|
||||
prettify_headers: If True, transforms 'created_by_name' → 'Created By Name'.
|
||||
list_joiner: String to join list values (default: ", ").
|
||||
"""
|
||||
self.prettify_headers = prettify_headers
|
||||
self.list_joiner = list_joiner
|
||||
|
||||
def _prettify_header(self, header: str) -> str:
|
||||
"""Transform 'created_by_name' → 'Created By Name'"""
|
||||
return header.replace("_", " ").title()
|
||||
|
||||
def _normalize_header(self, header: str) -> str:
|
||||
"""Transform 'Display Name' → 'display_name' (reverse of prettify)"""
|
||||
return header.strip().lower().replace(" ", "_")
|
||||
|
||||
def _format_value(self, value: Any) -> Any:
|
||||
"""Format a value for XLSX cell."""
|
||||
if value is None:
|
||||
return ""
|
||||
if isinstance(value, list):
|
||||
return self.list_joiner.join(str(v) for v in value)
|
||||
if isinstance(value, dict):
|
||||
return json.dumps(value)
|
||||
return value
|
||||
|
||||
def encode(self, data: List[Dict]) -> bytes:
|
||||
"""Encode data to XLSX bytes."""
|
||||
wb = Workbook()
|
||||
ws = wb.active
|
||||
|
||||
if not data:
|
||||
# Return empty workbook
|
||||
output = BytesIO()
|
||||
wb.save(output)
|
||||
output.seek(0)
|
||||
return output.getvalue()
|
||||
|
||||
# Collect all unique field names in order
|
||||
fieldnames = []
|
||||
for row in data:
|
||||
for key in row.keys():
|
||||
if key not in fieldnames:
|
||||
fieldnames.append(key)
|
||||
|
||||
# Write header row
|
||||
if self.prettify_headers:
|
||||
headers = [self._prettify_header(key) for key in fieldnames]
|
||||
else:
|
||||
headers = fieldnames
|
||||
ws.append(headers)
|
||||
|
||||
# Write data rows
|
||||
for row in data:
|
||||
ws.append([self._format_value(row.get(key, "")) for key in fieldnames])
|
||||
|
||||
output = BytesIO()
|
||||
wb.save(output)
|
||||
output.seek(0)
|
||||
return output.getvalue()
|
||||
|
||||
def decode(self, content: bytes, normalize_headers: bool = True) -> List[Dict]:
|
||||
"""
|
||||
Decode XLSX bytes to list of dicts.
|
||||
|
||||
Args:
|
||||
content: XLSX file bytes
|
||||
normalize_headers: If True, converts 'Display Name' → 'display_name'
|
||||
"""
|
||||
wb = load_workbook(filename=BytesIO(content), read_only=True, data_only=True)
|
||||
ws = wb.active
|
||||
|
||||
rows = list(ws.iter_rows(values_only=True))
|
||||
if not rows:
|
||||
return []
|
||||
|
||||
# First row is headers
|
||||
headers = list(rows[0])
|
||||
if normalize_headers:
|
||||
headers = [self._normalize_header(str(h)) if h else "" for h in headers]
|
||||
|
||||
# Convert remaining rows to dicts
|
||||
result = []
|
||||
for row in rows[1:]:
|
||||
row_dict = {}
|
||||
for i, value in enumerate(row):
|
||||
if i < len(headers) and headers[i]:
|
||||
# Try to parse JSON strings back to lists/dicts
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
parsed = json.loads(value)
|
||||
if isinstance(parsed, (list, dict)):
|
||||
value = parsed
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
row_dict[headers[i]] = value
|
||||
result.append(row_dict)
|
||||
|
||||
return result
|
||||
|
||||
@property
|
||||
def extension(self) -> str:
|
||||
return "xlsx"
|
||||
6
apps/api/plane/utils/porters/serializers/__init__.py
Normal file
6
apps/api/plane/utils/porters/serializers/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .issue import IssueExportSerializer
|
||||
|
||||
__all__ = [
|
||||
# Export Serializers
|
||||
"IssueExportSerializer",
|
||||
]
|
||||
141
apps/api/plane/utils/porters/serializers/issue.py
Normal file
141
apps/api/plane/utils/porters/serializers/issue.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# Third party imports
|
||||
from rest_framework import serializers
|
||||
|
||||
# Module imports
|
||||
from plane.app.serializers import IssueSerializer
|
||||
|
||||
|
||||
class IssueExportSerializer(IssueSerializer):
|
||||
"""
|
||||
Export-optimized serializer that extends IssueSerializer with human-readable fields.
|
||||
|
||||
Converts UUIDs to readable values for CSV/JSON export.
|
||||
"""
|
||||
|
||||
identifier = serializers.SerializerMethodField()
|
||||
project_name = serializers.CharField(source='project.name', read_only=True, default="")
|
||||
project_identifier = serializers.CharField(source='project.identifier', read_only=True, default="")
|
||||
state_name = serializers.CharField(source='state.name', read_only=True, default="")
|
||||
created_by_name = serializers.CharField(source='created_by.full_name', read_only=True, default="")
|
||||
|
||||
assignees = serializers.SerializerMethodField()
|
||||
parent = serializers.SerializerMethodField()
|
||||
labels = serializers.SerializerMethodField()
|
||||
cycles = serializers.SerializerMethodField()
|
||||
modules = serializers.SerializerMethodField()
|
||||
comments = serializers.SerializerMethodField()
|
||||
estimate = serializers.SerializerMethodField()
|
||||
links = serializers.SerializerMethodField()
|
||||
relations = serializers.SerializerMethodField()
|
||||
subscribers = serializers.SerializerMethodField()
|
||||
|
||||
class Meta(IssueSerializer.Meta):
|
||||
fields = [
|
||||
"project_name",
|
||||
"project_identifier",
|
||||
"parent",
|
||||
"identifier",
|
||||
"sequence_id",
|
||||
"name",
|
||||
"state_name",
|
||||
"priority",
|
||||
"assignees",
|
||||
"subscribers",
|
||||
"created_by_name",
|
||||
"start_date",
|
||||
"target_date",
|
||||
"completed_at",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
"archived_at",
|
||||
"estimate",
|
||||
"labels",
|
||||
"cycles",
|
||||
"modules",
|
||||
"links",
|
||||
"relations",
|
||||
"comments",
|
||||
"sub_issues_count",
|
||||
"link_count",
|
||||
"attachment_count",
|
||||
"is_draft",
|
||||
]
|
||||
|
||||
def get_identifier(self, obj):
|
||||
return f"{obj.project.identifier}-{obj.sequence_id}"
|
||||
|
||||
def get_assignees(self, obj):
|
||||
return [u.full_name for u in obj.assignees.all() if u.is_active]
|
||||
|
||||
def get_subscribers(self, obj):
|
||||
"""Return list of subscriber names."""
|
||||
return [sub.subscriber.full_name for sub in obj.issue_subscribers.all() if sub.subscriber]
|
||||
|
||||
def get_parent(self, obj):
|
||||
if not obj.parent:
|
||||
return ""
|
||||
return f"{obj.parent.project.identifier}-{obj.parent.sequence_id}"
|
||||
|
||||
def get_labels(self, obj):
|
||||
return [
|
||||
il.label.name
|
||||
for il in obj.label_issue.all()
|
||||
if il.deleted_at is None
|
||||
]
|
||||
|
||||
def get_cycles(self, obj):
|
||||
return [ic.cycle.name for ic in obj.issue_cycle.all()]
|
||||
|
||||
def get_modules(self, obj):
|
||||
return [im.module.name for im in obj.issue_module.all()]
|
||||
|
||||
def get_estimate(self, obj):
|
||||
"""Return estimate point value."""
|
||||
if obj.estimate_point:
|
||||
return obj.estimate_point.value if hasattr(obj.estimate_point, 'value') else str(obj.estimate_point)
|
||||
return ""
|
||||
|
||||
def get_links(self, obj):
|
||||
"""Return list of issue links with titles."""
|
||||
return [
|
||||
{
|
||||
"url": link.url,
|
||||
"title": link.title if link.title else link.url,
|
||||
}
|
||||
for link in obj.issue_link.all()
|
||||
]
|
||||
|
||||
def get_relations(self, obj):
|
||||
"""Return list of related issues."""
|
||||
relations = []
|
||||
|
||||
# Outgoing relations (this issue relates to others)
|
||||
for rel in obj.issue_relation.all():
|
||||
if rel.related_issue:
|
||||
relations.append({
|
||||
"type": rel.relation_type if hasattr(rel, 'relation_type') else "related",
|
||||
"issue": f"{rel.related_issue.project.identifier}-{rel.related_issue.sequence_id}",
|
||||
"direction": "outgoing"
|
||||
})
|
||||
|
||||
# Incoming relations (other issues relate to this one)
|
||||
for rel in obj.issue_related.all():
|
||||
if rel.issue:
|
||||
relations.append({
|
||||
"type": rel.relation_type if hasattr(rel, 'relation_type') else "related",
|
||||
"issue": f"{rel.issue.project.identifier}-{rel.issue.sequence_id}",
|
||||
"direction": "incoming"
|
||||
})
|
||||
|
||||
return relations
|
||||
|
||||
def get_comments(self, obj):
|
||||
"""Return list of comments with author and timestamp."""
|
||||
return [
|
||||
{
|
||||
"comment": comment.comment_stripped if hasattr(comment, 'comment_stripped') else comment.comment_html,
|
||||
"created_by": comment.actor.full_name if comment.actor else "",
|
||||
"created_at": comment.created_at.strftime("%Y-%m-%d %H:%M:%S") if comment.created_at else "",
|
||||
}
|
||||
for comment in obj.issue_comments.all()
|
||||
]
|
||||
Reference in New Issue
Block a user