refactor: split editor shell into separate step components with shared interfaces

This commit is contained in:
Ender 2025-10-24 20:18:03 +02:00
parent bb166f9377
commit 7ca9b130c3
9 changed files with 315 additions and 152 deletions

View File

@ -1,10 +1,14 @@
import { Box, Button, Stack, Typography, TextField, MenuItem, Snackbar, Alert, Stepper, Step, StepLabel, StepButton } from '@mui/material';
import AdminLayout from '../layout/AdminLayout';
import Recorder from '../features/recorder/Recorder';
import RichEditor from './RichEditor';
import type { RichEditorHandle } from './RichEditor';
import MediaLibrary from './MediaLibrary';
import MetadataPanel, { type Metadata } from './MetadataPanel';
import { type Metadata } from './MetadataPanel';
import StepAssets from './steps/StepAssets';
import StepAiPrompt from './steps/StepAiPrompt';
import StepGenerate from './steps/StepGenerate';
import StepEdit from './steps/StepEdit';
import StepMetadata from './steps/StepMetadata';
import StepPublish from './steps/StepPublish';
import StepContainer from './steps/StepContainer';
import { useEffect, useRef, useState } from 'react';
export default function EditorShell({ onLogout, initialPostId, onBack }: { onLogout?: () => void; initialPostId?: string | null; onBack?: () => void }) {
@ -208,174 +212,69 @@ export default function EditorShell({ onLogout, initialPostId, onBack }: { onLog
</Stepper>
{activeStep === 0 && (
<Box sx={{ display: 'grid', gap: 2 }}>
<Typography variant="subtitle1">Assets (Audio & Images)</Typography>
<Stack direction={{ xs: 'column', md: 'row' }} spacing={2} alignItems="stretch">
<Box sx={{ flex: 1 }}>
<Recorder
postId={draftId ?? undefined}
onInsertAtCursor={(html: string) => editorRef.current?.insertHtmlAtCursor(html)}
initialClips={postClips}
/>
</Box>
<Box sx={{ flex: 1 }}>
<MediaLibrary
onInsert={(url) => {
if (editorRef.current) {
editorRef.current.insertHtmlAtCursor(`<img src="${url}" alt="" />`);
} else {
setDraft((prev) => `${prev || ''}<p><img src="${url}" alt="" /></p>`);
}
}}
onSetFeature={(url) => setMeta(m => ({ ...m, featureImage: url }))}
showSetFeature
/>
</Box>
</Stack>
</Box>
<StepContainer>
<StepAssets
draftId={draftId}
postClips={postClips}
onInsertAtCursor={(html: string) => editorRef.current?.insertHtmlAtCursor(html)}
onInsertImage={(url: string) => {
if (editorRef.current) {
editorRef.current.insertHtmlAtCursor(`<img src="${url}" alt="" />`);
} else {
setDraft((prev) => `${prev || ''}<p><img src="${url}" alt="" /></p>`);
}
}}
onSetFeature={(url: string) => setMeta(m => ({ ...m, featureImage: url }))}
/>
</StepContainer>
)}
{activeStep === 1 && (
<Box>
<Typography variant="subtitle1" sx={{ mb: 1 }}>AI Prompt</Typography>
<TextField
label="Instructions + context for AI generation"
value={promptText}
onChange={(e) => setPromptText(e.target.value)}
fullWidth
multiline
minRows={6}
placeholder="Describe the goal, audience, tone, outline, and reference transcript/image context to guide AI content generation."
/>
</Box>
<StepContainer>
<StepAiPrompt promptText={promptText} onChangePrompt={setPromptText} />
</StepContainer>
)}
{activeStep === 2 && (
<Box sx={{ display: 'grid', gap: 2 }}>
<Typography variant="subtitle1">Generate</Typography>
<Typography variant="body2" sx={{ color: 'text.secondary' }}>
Select images as generation assets, review audio transcriptions, and set the prompt to guide AI.
</Typography>
{/* Audio transcriptions in order */}
<Box>
<Typography variant="subtitle2" sx={{ mb: 1 }}>Audio Transcriptions</Typography>
<Stack spacing={1}>
{[...postClips].sort((a, b) => new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime()).map((clip, idx) => (
<Box key={clip.id} sx={{ p: 1, border: '1px solid', borderColor: 'divider', borderRadius: 1, bgcolor: 'background.paper' }}>
<Typography variant="caption" sx={{ color: 'text.secondary' }}>#{idx + 1} · {new Date(clip.createdAt).toLocaleString()}</Typography>
<Typography variant="body2" sx={{ mt: 0.5 }}>{clip.transcript || '(no transcript yet)'}</Typography>
</Box>
))}
{postClips.length === 0 && (
<Typography variant="body2" sx={{ color: 'text.secondary' }}>(No audio clips)</Typography>
)}
</Stack>
</Box>
{/* Images selected for generation */}
<Box>
<Typography variant="subtitle2" sx={{ mb: 1 }}>Selected Images</Typography>
<Box sx={{ display: 'grid', gridTemplateColumns: 'repeat(auto-fill, minmax(120px, 1fr))', gap: 1 }}>
{genImageKeys.map((k) => (
<Box key={k} sx={{ p: 1, border: '1px solid', borderColor: 'divider', borderRadius: 1, textAlign: 'center', bgcolor: '#fafafa' }}>
<img src={`/api/media/obj?key=${encodeURIComponent(k)}`} alt={k.split('/').slice(-1)[0]} style={{ maxWidth: '100%', maxHeight: 100, objectFit: 'contain' }} />
<Button size="small" color="error" variant="text" onClick={() => toggleGenImage(k)} sx={{ mt: 0.5 }}>Remove</Button>
</Box>
))}
{genImageKeys.length === 0 && (
<Typography variant="body2" sx={{ color: 'text.secondary' }}>(No images selected)</Typography>
)}
</Box>
</Box>
{/* Media library for selecting images */}
<MediaLibrary
selectionMode
selectedKeys={genImageKeys}
onToggleSelect={toggleGenImage}
<StepContainer>
<StepGenerate
postClips={postClips}
genImageKeys={genImageKeys}
onToggleGenImage={toggleGenImage}
promptText={promptText}
onChangePrompt={setPromptText}
/>
{/* AI prompt used for generation */}
<Box>
<Typography variant="subtitle2" sx={{ mb: 1 }}>AI Prompt</Typography>
<TextField
label="Instructions + context for AI generation"
value={promptText}
onChange={(e) => setPromptText(e.target.value)}
fullWidth
multiline
minRows={4}
/>
</Box>
<Stack direction="row" spacing={1}>
<Button variant="contained" disabled>Generate Draft (Coming Soon)</Button>
<Button variant="outlined" onClick={() => setActiveStep(3)}>Skip to Edit</Button>
</Stack>
</Box>
</StepContainer>
)}
{activeStep === 3 && (
<Box>
<Typography variant="subtitle1" sx={{ mb: 1 }}>Edit Content</Typography>
<Box sx={{
overflowX: 'auto',
'& img': { maxWidth: '100%', height: 'auto' },
'& figure img': { display: 'block', margin: '0 auto' },
'& video, & iframe': { maxWidth: '100%' },
}}>
<RichEditor ref={editorRef as any} value={draft} onChange={(html) => setDraft(html)} placeholder="Write your post..." />
</Box>
{draftId && (
<Typography variant="caption" sx={{ mt: 1, display: 'block' }}>ID: {draftId}</Typography>
)}
</Box>
<StepContainer>
<StepEdit editorRef={editorRef as any} draftHtml={draft} onChangeDraft={setDraft} draftId={draftId} />
</StepContainer>
)}
{activeStep === 4 && (
<MetadataPanel
value={meta}
onChange={setMeta}
/>
<StepContainer>
<StepMetadata value={meta} onChange={setMeta} />
</StepContainer>
)}
{activeStep === 5 && (
<Box sx={{ display: 'grid', gap: 1 }}>
<Typography variant="subtitle1">Publish</Typography>
<Typography variant="body2" sx={{ color: 'text.secondary' }}>
Preview reflects Ghost media URL rewriting. Layout may differ from your Ghost theme.
</Typography>
<Stack direction="row" spacing={1}>
<Button size="small" variant="outlined" onClick={refreshPreview} disabled={previewLoading}>Refresh Preview</Button>
<Button size="small" variant="text" onClick={saveDraft}>Save Post</Button>
</Stack>
{previewLoading && (
<Box sx={{ p: 2, border: '1px dashed', borderColor: 'divider', borderRadius: 1 }}>Generating preview</Box>
)}
{previewError && (
<Alert severity="error">{previewError}</Alert>
)}
{!previewLoading && !previewError && (
<Box
sx={{
p: 1.5,
border: '1px solid #eee',
borderRadius: 1,
bgcolor: '#fff',
overflowX: 'auto',
'& img': { maxWidth: '100%', height: 'auto' },
'& figure img': { display: 'block', margin: '0 auto' },
'& video, & iframe': { maxWidth: '100%' },
}}
dangerouslySetInnerHTML={{ __html: previewHtml || draft }}
/>
)}
<Stack direction="row" spacing={1} sx={{ mt: 1 }}>
<Button variant="outlined" onClick={() => ghostPublish('draft')}>Save Draft to Ghost</Button>
<Button variant="contained" onClick={() => ghostPublish('published')}>Publish to Ghost</Button>
</Stack>
</Box>
<StepContainer sx={{ gap: 1 }}>
<StepPublish
previewLoading={previewLoading}
previewError={previewError}
previewHtml={previewHtml}
draftHtml={draft}
onRefreshPreview={refreshPreview}
onSaveDraft={saveDraft}
onGhostPublish={ghostPublish}
/>
</StepContainer>
)}
{/* Sticky bottom nav so Back/Next don't move */}

View File

@ -0,0 +1,26 @@
import { Box, Button, Typography } from '@mui/material';
export default function SelectedImages({
imageKeys,
onRemove,
}: {
imageKeys: string[];
onRemove: (key: string) => void;
}) {
return (
<Box>
<Typography variant="subtitle2" sx={{ mb: 1 }}>Selected Images</Typography>
<Box sx={{ display: 'grid', gridTemplateColumns: 'repeat(auto-fill, minmax(120px, 1fr))', gap: 1 }}>
{imageKeys.map((k) => (
<Box key={k} sx={{ p: 1, border: '1px solid', borderColor: 'divider', borderRadius: 1, textAlign: 'center', bgcolor: '#fafafa' }}>
<img src={`/api/media/obj?key=${encodeURIComponent(k)}`} alt={k.split('/').slice(-1)[0]} style={{ maxWidth: '100%', maxHeight: 100, objectFit: 'contain' }} />
<Button size="small" color="error" variant="text" onClick={() => onRemove(k)} sx={{ mt: 0.5 }}>Remove</Button>
</Box>
))}
{imageKeys.length === 0 && (
<Typography variant="body2" sx={{ color: 'text.secondary' }}>(No images selected)</Typography>
)}
</Box>
</Box>
);
}

View File

@ -0,0 +1,24 @@
import { Box, TextField, Typography } from '@mui/material';
export default function StepAiPrompt({
promptText,
onChangePrompt,
}: {
promptText: string;
onChangePrompt: (v: string) => void;
}) {
return (
<Box>
<Typography variant="subtitle1" sx={{ mb: 1 }}>AI Prompt</Typography>
<TextField
label="Instructions + context for AI generation"
value={promptText}
onChange={(e) => onChangePrompt(e.target.value)}
fullWidth
multiline
minRows={6}
placeholder="Describe the goal, audience, tone, outline, and reference transcript/image context to guide AI content generation."
/>
</Box>
);
}

View File

@ -0,0 +1,41 @@
import { Box, Stack, Typography } from '@mui/material';
import Recorder from '../../features/recorder/Recorder';
import MediaLibrary from '../MediaLibrary';
export type Clip = { id: string; bucket: string; key: string; mime: string; transcript?: string; createdAt: string };
export default function StepAssets({
draftId,
postClips,
onInsertAtCursor,
onInsertImage,
onSetFeature,
}: {
draftId?: string | null;
postClips: Clip[];
onInsertAtCursor: (html: string) => void;
onInsertImage: (url: string) => void;
onSetFeature: (url: string) => void;
}) {
return (
<Box sx={{ display: 'grid', gap: 2 }}>
<Typography variant="subtitle1">Assets (Audio & Images)</Typography>
<Stack direction={{ xs: 'column', md: 'row' }} spacing={2} alignItems="stretch">
<Box sx={{ flex: 1 }}>
<Recorder
postId={draftId ?? undefined}
onInsertAtCursor={onInsertAtCursor}
initialClips={postClips}
/>
</Box>
<Box sx={{ flex: 1 }}>
<MediaLibrary
onInsert={onInsertImage}
onSetFeature={onSetFeature}
showSetFeature
/>
</Box>
</Stack>
</Box>
);
}

View File

@ -0,0 +1,10 @@
import { Box, type SxProps, type Theme } from '@mui/material';
import type { PropsWithChildren } from 'react';
export default function StepContainer({ children, sx }: PropsWithChildren<{ sx?: SxProps<Theme> }>) {
return (
<Box sx={{ height: { xs: '70vh', md: '70vh' }, maxHeight: '70vh', overflowY: 'auto', pr: 0.5, display: 'grid', gap: 2, ...sx }}>
{children}
</Box>
);
}

View File

@ -0,0 +1,32 @@
import { Box, Typography } from '@mui/material';
import RichEditor, { type RichEditorHandle } from '../RichEditor';
import type { ForwardedRef } from 'react';
export default function StepEdit({
editorRef,
draftHtml,
onChangeDraft,
draftId,
}: {
editorRef: ForwardedRef<RichEditorHandle> | any;
draftHtml: string;
onChangeDraft: (html: string) => void;
draftId?: string | null;
}) {
return (
<Box>
<Typography variant="subtitle1" sx={{ mb: 1 }}>Edit Content</Typography>
<Box sx={{
overflowX: 'auto',
'& img': { maxWidth: '100%', height: 'auto' },
'& figure img': { display: 'block', margin: '0 auto' },
'& video, & iframe': { maxWidth: '100%' },
}}>
<RichEditor ref={editorRef} value={draftHtml} onChange={onChangeDraft} placeholder="Write your post..." />
</Box>
{draftId && (
<Typography variant="caption" sx={{ mt: 1, display: 'block' }}>ID: {draftId}</Typography>
)}
</Box>
);
}

View File

@ -0,0 +1,64 @@
import { Box, Stack, TextField, Typography } from '@mui/material';
import MediaLibrary from '../MediaLibrary';
import SelectedImages from './SelectedImages';
import type { Clip } from './StepAssets';
export default function StepGenerate({
postClips,
genImageKeys,
onToggleGenImage,
promptText,
onChangePrompt,
}: {
postClips: Clip[];
genImageKeys: string[];
onToggleGenImage: (key: string) => void;
promptText: string;
onChangePrompt: (v: string) => void;
}) {
return (
<Box sx={{ display: 'grid', gap: 2 }}>
<Typography variant="subtitle1">Generate</Typography>
<Typography variant="body2" sx={{ color: 'text.secondary' }}>
Select images as generation assets, review audio transcriptions, and set the prompt to guide AI.
</Typography>
{/* Audio transcriptions in order */}
<Box>
<Typography variant="subtitle2" sx={{ mb: 1 }}>Audio Transcriptions</Typography>
<Stack spacing={1}>
{[...postClips]
.sort((a, b) => new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime())
.map((clip, idx) => (
<Box key={clip.id} sx={{ p: 1, border: '1px solid', borderColor: 'divider', borderRadius: 1, bgcolor: 'background.paper' }}>
<Typography variant="caption" sx={{ color: 'text.secondary' }}>#{idx + 1} · {new Date(clip.createdAt).toLocaleString()}</Typography>
<Typography variant="body2" sx={{ mt: 0.5 }}>{clip.transcript || '(no transcript yet)'}</Typography>
</Box>
))}
{postClips.length === 0 && (
<Typography variant="body2" sx={{ color: 'text.secondary' }}>(No audio clips)</Typography>
)}
</Stack>
</Box>
{/* Selected images */}
<SelectedImages imageKeys={genImageKeys} onRemove={onToggleGenImage} />
{/* Media library */}
<MediaLibrary selectionMode selectedKeys={genImageKeys} onToggleSelect={onToggleGenImage} />
{/* Prompt */}
<Box>
<Typography variant="subtitle2" sx={{ mb: 1 }}>AI Prompt</Typography>
<TextField
label="Instructions + context for AI generation"
value={promptText}
onChange={(e) => onChangePrompt(e.target.value)}
fullWidth
multiline
minRows={4}
/>
</Box>
</Box>
);
}

View File

@ -0,0 +1,10 @@
import { Box } from '@mui/material';
import MetadataPanel, { type Metadata } from '../MetadataPanel';
export default function StepMetadata({ value, onChange }: { value: Metadata; onChange: (v: Metadata) => void }) {
return (
<Box>
<MetadataPanel value={value} onChange={onChange} />
</Box>
);
}

View File

@ -0,0 +1,57 @@
import { Alert, Box, Button, Stack, Typography } from '@mui/material';
export default function StepPublish({
previewLoading,
previewError,
previewHtml,
draftHtml,
onRefreshPreview,
onSaveDraft,
onGhostPublish,
}: {
previewLoading: boolean;
previewError: string | null;
previewHtml: string;
draftHtml: string;
onRefreshPreview: () => void;
onSaveDraft: () => void;
onGhostPublish: (status: 'draft' | 'published') => void;
}) {
return (
<Box sx={{ display: 'grid', gap: 1 }}>
<Typography variant="subtitle1">Publish</Typography>
<Typography variant="body2" sx={{ color: 'text.secondary' }}>
Preview reflects Ghost media URL rewriting. Layout may differ from your Ghost theme.
</Typography>
<Stack direction="row" spacing={1}>
<Button size="small" variant="outlined" onClick={onRefreshPreview} disabled={previewLoading}>Refresh Preview</Button>
<Button size="small" variant="text" onClick={onSaveDraft}>Save Post</Button>
</Stack>
{previewLoading && (
<Box sx={{ p: 2, border: '1px dashed', borderColor: 'divider', borderRadius: 1 }}>Generating preview</Box>
)}
{previewError && (
<Alert severity="error">{previewError}</Alert>
)}
{!previewLoading && !previewError && (
<Box
sx={{
p: 1.5,
border: '1px solid #eee',
borderRadius: 1,
bgcolor: '#fff',
overflowX: 'auto',
'& img': { maxWidth: '100%', height: 'auto' },
'& figure img': { display: 'block', margin: '0 auto' },
'& video, & iframe': { maxWidth: '100%' },
}}
dangerouslySetInnerHTML={{ __html: previewHtml || draftHtml }}
/>
)}
<Stack direction="row" spacing={1} sx={{ mt: 1 }}>
<Button variant="outlined" onClick={() => onGhostPublish('draft')}>Save Draft to Ghost</Button>
<Button variant="contained" onClick={() => onGhostPublish('published')}>Publish to Ghost</Button>
</Stack>
</Box>
);
}