Node flags (cmd/node/main.go):
--max-cpu / --max-ram-mb — Go runtime caps (GOMAXPROCS / GOMEMLIMIT)
--feed-disk-limit-mb — hard 507 refusal for new post bodies over quota
--chain-disk-limit-mb — advisory watcher (can't reject blocks without
breaking consensus; logs WARN every minute)
Client — Saved Messages (self-chat):
- Auto-created on sign-in, pinned top of chat list, blue bookmark avatar
- Send short-circuits the relay (no encrypt, no fee, no mailbox hop)
- Empty state rendered outside inverted FlatList — fixes the mirrored
"say hi…" on Android RTL-aware layout builds
- PostCard shows "You" for own posts instead of the self-contact alias
Client — user walls:
- New route /(app)/feed/author/[pub] with infinite-scroll via
`created_at` cursor and pull-to-refresh
- Profile screen gains "View posts" button (universal) next to
"Open chat" (contact-only)
Feed pipeline:
- Bump client JPEG quality 0.5 → 0.75 to match server scrubber (Q=75),
so a 60 KiB compose doesn't balloon past 256 KiB after server re-encode
- ErrPostTooLarge now wraps with the actual size vs cap, errors.Is
preserved in the HTTP layer
- FeedMailbox quota + DiskUsage surface — supports new CLI flag
README:
- Step-by-step "first node / joiner" section on the landing page,
full flag tables incl. the new resource-cap group, minimal
checklists for open/private/low-end deployments
776 lines
26 KiB
Go
776 lines
26 KiB
Go
package node
|
||
|
||
// Feed HTTP endpoints (v2.0.0).
|
||
//
|
||
// Mount points:
|
||
//
|
||
// POST /feed/publish — store a post body (authenticated)
|
||
// GET /feed/post/{id} — fetch a post body
|
||
// GET /feed/post/{id}/stats — {views, likes, liked_by_me?} aggregate
|
||
// POST /feed/post/{id}/view — increment off-chain view counter
|
||
// GET /feed/author/{pub} — ?limit=N, posts by an author
|
||
// GET /feed/timeline — ?follower=<pub>&limit=N, merged feed of follows
|
||
// GET /feed/trending — ?window=h&limit=N, top by likes + views
|
||
// GET /feed/foryou — ?pub=<pub>&limit=N, recommendations
|
||
// GET /feed/hashtag/{tag} — posts matching a hashtag
|
||
//
|
||
// Publish flow:
|
||
// 1. Client POSTs {content, attachment, post_id, author, sig, ts}.
|
||
// 2. Node verifies sig (Ed25519 over canonical bytes), hashes body,
|
||
// stores in FeedMailbox, returns hosting_relay + content_hash + size.
|
||
// 3. Client then submits on-chain CREATE_POST tx with that metadata.
|
||
// Node charges the fee (base + size×byte_fee) and credits the relay.
|
||
// 4. Subsequent GET /feed/post/{id} serves the stored body to anyone.
|
||
//
|
||
// Why the split? On-chain metadata gives us provable authorship + the
|
||
// pay-for-storage incentive; off-chain body storage keeps the block
|
||
// history small. If the hosting relay dies, the on-chain record stays
|
||
// (with a "body unavailable" fallback on the reader side) — authors can
|
||
// re-publish to another relay.
|
||
|
||
import (
|
||
"context"
|
||
"crypto/sha256"
|
||
"encoding/base64"
|
||
"encoding/hex"
|
||
"encoding/json"
|
||
"errors"
|
||
"fmt"
|
||
"log"
|
||
"net/http"
|
||
"sort"
|
||
"strings"
|
||
"time"
|
||
|
||
"go-blockchain/blockchain"
|
||
"go-blockchain/identity"
|
||
"go-blockchain/media"
|
||
"go-blockchain/relay"
|
||
)
|
||
|
||
// FeedConfig wires feed HTTP endpoints to the relay mailbox and the
|
||
// chain for read-after-write queries.
|
||
type FeedConfig struct {
|
||
Mailbox *relay.FeedMailbox
|
||
|
||
// HostingRelayPub is this node's Ed25519 pubkey — returned from
|
||
// /feed/publish so the client knows who to put in CREATE_POST tx.
|
||
HostingRelayPub string
|
||
|
||
// Scrubber strips metadata from image/video/audio attachments before
|
||
// they are stored. MUST be non-nil; a zero Scrubber (NewScrubber with
|
||
// empty sidecar URL) still handles images in-process — only video/audio
|
||
// require sidecar config.
|
||
Scrubber *media.Scrubber
|
||
|
||
// AllowUnscrubbedVideo controls server behaviour when a video upload
|
||
// arrives and no sidecar is configured. false (default) → reject; true
|
||
// → store as-is with a warning log. Set via --allow-unscrubbed-video
|
||
// flag on the node. Leave false in production.
|
||
AllowUnscrubbedVideo bool
|
||
|
||
// Chain lookups (nil-safe; endpoints degrade gracefully).
|
||
GetPost func(postID string) (*blockchain.PostRecord, error)
|
||
LikeCount func(postID string) (uint64, error)
|
||
HasLiked func(postID, likerPub string) (bool, error)
|
||
PostsByAuthor func(authorPub string, beforeTs int64, limit int) ([]*blockchain.PostRecord, error)
|
||
Following func(followerPub string) ([]string, error)
|
||
}
|
||
|
||
// RegisterFeedRoutes wires feed endpoints onto mux. Writes are rate-limited
|
||
// via withSubmitTxGuards; reads via withReadLimit (same limiters as /relay).
|
||
func RegisterFeedRoutes(mux *http.ServeMux, cfg FeedConfig) {
|
||
if cfg.Mailbox == nil {
|
||
return
|
||
}
|
||
mux.HandleFunc("/feed/publish", withSubmitTxGuards(feedPublish(cfg)))
|
||
mux.HandleFunc("/feed/post/", withReadLimit(feedPostRouter(cfg)))
|
||
mux.HandleFunc("/feed/author/", withReadLimit(feedAuthor(cfg)))
|
||
mux.HandleFunc("/feed/timeline", withReadLimit(feedTimeline(cfg)))
|
||
mux.HandleFunc("/feed/trending", withReadLimit(feedTrending(cfg)))
|
||
mux.HandleFunc("/feed/foryou", withReadLimit(feedForYou(cfg)))
|
||
mux.HandleFunc("/feed/hashtag/", withReadLimit(feedHashtag(cfg)))
|
||
}
|
||
|
||
// ── POST /feed/publish ────────────────────────────────────────────────────
|
||
|
||
// feedPublishRequest — what the client sends. Signature is Ed25519 over
|
||
// canonical bytes: "publish:<post_id>:<content_sha256_hex>:<ts>".
|
||
// ts must be within ±5 minutes of server clock.
|
||
type feedPublishRequest struct {
|
||
PostID string `json:"post_id"`
|
||
Author string `json:"author"` // hex Ed25519
|
||
Content string `json:"content"`
|
||
ContentType string `json:"content_type,omitempty"`
|
||
AttachmentB64 string `json:"attachment_b64,omitempty"`
|
||
AttachmentMIME string `json:"attachment_mime,omitempty"`
|
||
ReplyTo string `json:"reply_to,omitempty"`
|
||
QuoteOf string `json:"quote_of,omitempty"`
|
||
Sig string `json:"sig"` // base64 Ed25519 sig
|
||
Ts int64 `json:"ts"`
|
||
}
|
||
|
||
type feedPublishResponse struct {
|
||
PostID string `json:"post_id"`
|
||
HostingRelay string `json:"hosting_relay"`
|
||
ContentHash string `json:"content_hash"` // hex sha256
|
||
Size uint64 `json:"size"`
|
||
Hashtags []string `json:"hashtags"`
|
||
EstimatedFeeUT uint64 `json:"estimated_fee_ut"` // base + size*byte_fee
|
||
}
|
||
|
||
func feedPublish(cfg FeedConfig) http.HandlerFunc {
|
||
const publishSkewSecs = 300
|
||
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodPost {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
var req feedPublishRequest
|
||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||
jsonErr(w, fmt.Errorf("invalid JSON: %w", err), 400)
|
||
return
|
||
}
|
||
if req.PostID == "" || req.Author == "" || req.Sig == "" || req.Ts == 0 {
|
||
jsonErr(w, fmt.Errorf("post_id, author, sig, ts are required"), 400)
|
||
return
|
||
}
|
||
if req.Content == "" && req.AttachmentB64 == "" {
|
||
jsonErr(w, fmt.Errorf("post must have content or attachment"), 400)
|
||
return
|
||
}
|
||
now := time.Now().Unix()
|
||
if req.Ts < now-publishSkewSecs || req.Ts > now+publishSkewSecs {
|
||
jsonErr(w, fmt.Errorf("ts out of range (±%ds)", publishSkewSecs), 400)
|
||
return
|
||
}
|
||
if req.ReplyTo != "" && req.QuoteOf != "" {
|
||
jsonErr(w, fmt.Errorf("reply_to and quote_of are mutually exclusive"), 400)
|
||
return
|
||
}
|
||
|
||
// Decode attachment (raw upload — before scrub).
|
||
var rawAttachment []byte
|
||
var attachmentMIME string
|
||
if req.AttachmentB64 != "" {
|
||
b, err := base64.StdEncoding.DecodeString(req.AttachmentB64)
|
||
if err != nil {
|
||
if b, err = base64.RawURLEncoding.DecodeString(req.AttachmentB64); err != nil {
|
||
jsonErr(w, fmt.Errorf("attachment_b64: invalid base64"), 400)
|
||
return
|
||
}
|
||
}
|
||
rawAttachment = b
|
||
attachmentMIME = req.AttachmentMIME
|
||
}
|
||
|
||
// ── Step 1: verify signature over the RAW-upload hash ──────────
|
||
// The client signs what it sent. The server recomputes hash over
|
||
// the as-received bytes and verifies — this proves the upload
|
||
// came from the claimed author and wasn't tampered with in transit.
|
||
rawHasher := sha256.New()
|
||
rawHasher.Write([]byte(req.Content))
|
||
rawHasher.Write(rawAttachment)
|
||
rawContentHash := rawHasher.Sum(nil)
|
||
rawContentHashHex := hex.EncodeToString(rawContentHash)
|
||
|
||
msg := []byte(fmt.Sprintf("publish:%s:%s:%d", req.PostID, rawContentHashHex, req.Ts))
|
||
sigBytes, err := base64.StdEncoding.DecodeString(req.Sig)
|
||
if err != nil {
|
||
if sigBytes, err = base64.RawURLEncoding.DecodeString(req.Sig); err != nil {
|
||
jsonErr(w, fmt.Errorf("sig: invalid base64"), 400)
|
||
return
|
||
}
|
||
}
|
||
if _, err := hex.DecodeString(req.Author); err != nil {
|
||
jsonErr(w, fmt.Errorf("author: invalid hex"), 400)
|
||
return
|
||
}
|
||
ok, err := identity.Verify(req.Author, msg, sigBytes)
|
||
if err != nil || !ok {
|
||
jsonErr(w, fmt.Errorf("signature invalid"), 403)
|
||
return
|
||
}
|
||
|
||
// ── Step 2: MANDATORY server-side metadata scrub ─────────────
|
||
// Runs AFTER signature verification so a fake client can't burn
|
||
// CPU by triggering expensive scrub work on unauthenticated inputs.
|
||
//
|
||
// Images: in-process stdlib re-encode → kills EXIF/GPS/ICC/XMP by
|
||
// construction. Videos/audio: forwarded to FFmpeg sidecar; without
|
||
// one, we reject unless operator opted in to unscrubbed video.
|
||
attachment := rawAttachment
|
||
if len(attachment) > 0 {
|
||
if cfg.Scrubber == nil {
|
||
jsonErr(w, fmt.Errorf("media scrubber not configured on this node"), 503)
|
||
return
|
||
}
|
||
ctx, cancel := context.WithTimeout(r.Context(), 60*time.Second)
|
||
cleaned, newMIME, err := cfg.Scrubber.Scrub(ctx, attachment, attachmentMIME)
|
||
cancel()
|
||
if err != nil {
|
||
if err == media.ErrSidecarUnavailable && cfg.AllowUnscrubbedVideo {
|
||
log.Printf("[feed] WARNING: storing unscrubbed video — no sidecar configured (author=%s)", req.Author)
|
||
} else {
|
||
status := 400
|
||
if err == media.ErrSidecarUnavailable {
|
||
status = 503
|
||
}
|
||
jsonErr(w, fmt.Errorf("scrub attachment: %w", err), status)
|
||
return
|
||
}
|
||
} else {
|
||
attachment = cleaned
|
||
attachmentMIME = newMIME
|
||
}
|
||
}
|
||
|
||
// ── Step 3: recompute content hash over the SCRUBBED bytes ────
|
||
// This is what goes into the response + on-chain CREATE_POST, so
|
||
// anyone fetching the body can verify integrity against the chain.
|
||
// The signature check already used the raw-upload hash above;
|
||
// this final hash binds the on-chain record to what readers will
|
||
// actually download.
|
||
finalHasher := sha256.New()
|
||
finalHasher.Write([]byte(req.Content))
|
||
finalHasher.Write(attachment)
|
||
contentHash := finalHasher.Sum(nil)
|
||
contentHashHex := hex.EncodeToString(contentHash)
|
||
|
||
post := &relay.FeedPost{
|
||
PostID: req.PostID,
|
||
Author: req.Author,
|
||
Content: req.Content,
|
||
ContentType: req.ContentType,
|
||
Attachment: attachment,
|
||
AttachmentMIME: attachmentMIME,
|
||
ReplyTo: req.ReplyTo,
|
||
QuoteOf: req.QuoteOf,
|
||
}
|
||
hashtags, err := cfg.Mailbox.Store(post, req.Ts)
|
||
if err != nil {
|
||
if errors.Is(err, relay.ErrPostTooLarge) {
|
||
jsonErr(w, err, 413)
|
||
return
|
||
}
|
||
if errors.Is(err, relay.ErrFeedQuotaExceeded) {
|
||
// 507 Insufficient Storage — the client should try
|
||
// another relay (or wait for TTL-driven eviction here).
|
||
jsonErr(w, err, 507)
|
||
return
|
||
}
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
|
||
// Report what the client should put into CREATE_POST.
|
||
size := uint64(len(req.Content)) + uint64(len(attachment)) + 128
|
||
fee := blockchain.BasePostFee + size*blockchain.PostByteFee
|
||
jsonOK(w, feedPublishResponse{
|
||
PostID: req.PostID,
|
||
HostingRelay: cfg.HostingRelayPub,
|
||
ContentHash: contentHashHex,
|
||
Size: size,
|
||
Hashtags: hashtags,
|
||
EstimatedFeeUT: fee,
|
||
})
|
||
}
|
||
}
|
||
|
||
// ── GET /feed/post/{id} [+ /stats subroute, POST /view] ─────────────────
|
||
|
||
// feedPostRouter dispatches /feed/post/{id}, /feed/post/{id}/stats,
|
||
// /feed/post/{id}/view to the right handler.
|
||
func feedPostRouter(cfg FeedConfig) http.HandlerFunc {
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
rest := strings.TrimPrefix(r.URL.Path, "/feed/post/")
|
||
rest = strings.Trim(rest, "/")
|
||
if rest == "" {
|
||
jsonErr(w, fmt.Errorf("post id required"), 400)
|
||
return
|
||
}
|
||
parts := strings.Split(rest, "/")
|
||
postID := parts[0]
|
||
if len(parts) == 1 {
|
||
feedGetPost(cfg)(w, r, postID)
|
||
return
|
||
}
|
||
switch parts[1] {
|
||
case "stats":
|
||
feedPostStats(cfg)(w, r, postID)
|
||
case "view":
|
||
feedPostView(cfg)(w, r, postID)
|
||
case "attachment":
|
||
feedPostAttachment(cfg)(w, r, postID)
|
||
default:
|
||
jsonErr(w, fmt.Errorf("unknown sub-route %q", parts[1]), 404)
|
||
}
|
||
}
|
||
}
|
||
|
||
// feedPostAttachment handles GET /feed/post/{id}/attachment — returns the
|
||
// raw attachment bytes with the correct Content-Type so clients can use
|
||
// the URL directly as an <Image source={uri: ...}>.
|
||
//
|
||
// Why a dedicated endpoint? The /feed/post/{id} response wraps the body
|
||
// as base64 inside JSON; fetching that + decoding for N posts in a feed
|
||
// list would blow up memory. Native image loaders stream bytes straight
|
||
// to the GPU — this route lets them do that without intermediate JSON.
|
||
//
|
||
// Respects on-chain soft-delete: returns 410 when the post is tombstoned.
|
||
func feedPostAttachment(cfg FeedConfig) postHandler {
|
||
return func(w http.ResponseWriter, r *http.Request, postID string) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
if cfg.GetPost != nil {
|
||
if rec, _ := cfg.GetPost(postID); rec != nil && rec.Deleted {
|
||
jsonErr(w, fmt.Errorf("post %s deleted", postID), 410)
|
||
return
|
||
}
|
||
}
|
||
post, err := cfg.Mailbox.Get(postID)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
if post == nil || len(post.Attachment) == 0 {
|
||
jsonErr(w, fmt.Errorf("no attachment for post %s", postID), 404)
|
||
return
|
||
}
|
||
mime := post.AttachmentMIME
|
||
if mime == "" {
|
||
mime = "application/octet-stream"
|
||
}
|
||
w.Header().Set("Content-Type", mime)
|
||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(post.Attachment)))
|
||
// Cache for 1 hour — attachments are immutable (tied to content_hash),
|
||
// so aggressive client-side caching is safe and saves bandwidth.
|
||
w.Header().Set("Cache-Control", "public, max-age=3600, immutable")
|
||
w.Header().Set("ETag", `"`+postID+`"`)
|
||
w.WriteHeader(http.StatusOK)
|
||
_, _ = w.Write(post.Attachment)
|
||
}
|
||
}
|
||
|
||
type postHandler func(w http.ResponseWriter, r *http.Request, postID string)
|
||
|
||
func feedGetPost(cfg FeedConfig) postHandler {
|
||
return func(w http.ResponseWriter, r *http.Request, postID string) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
post, err := cfg.Mailbox.Get(postID)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
if post == nil {
|
||
jsonErr(w, fmt.Errorf("post %s not found", postID), 404)
|
||
return
|
||
}
|
||
// Respect on-chain soft-delete.
|
||
if cfg.GetPost != nil {
|
||
if rec, _ := cfg.GetPost(postID); rec != nil && rec.Deleted {
|
||
jsonErr(w, fmt.Errorf("post %s deleted", postID), 410)
|
||
return
|
||
}
|
||
}
|
||
jsonOK(w, post)
|
||
}
|
||
}
|
||
|
||
type postStatsResponse struct {
|
||
PostID string `json:"post_id"`
|
||
Views uint64 `json:"views"`
|
||
Likes uint64 `json:"likes"`
|
||
LikedByMe *bool `json:"liked_by_me,omitempty"` // set only when ?me=<pub> given
|
||
}
|
||
|
||
func feedPostStats(cfg FeedConfig) postHandler {
|
||
return func(w http.ResponseWriter, r *http.Request, postID string) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
views, _ := cfg.Mailbox.ViewCount(postID)
|
||
var likes uint64
|
||
if cfg.LikeCount != nil {
|
||
likes, _ = cfg.LikeCount(postID)
|
||
}
|
||
resp := postStatsResponse{
|
||
PostID: postID,
|
||
Views: views,
|
||
Likes: likes,
|
||
}
|
||
if me := r.URL.Query().Get("me"); me != "" && cfg.HasLiked != nil {
|
||
if liked, err := cfg.HasLiked(postID, me); err == nil {
|
||
resp.LikedByMe = &liked
|
||
}
|
||
}
|
||
jsonOK(w, resp)
|
||
}
|
||
}
|
||
|
||
func feedPostView(cfg FeedConfig) postHandler {
|
||
return func(w http.ResponseWriter, r *http.Request, postID string) {
|
||
if r.Method != http.MethodPost {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
next, err := cfg.Mailbox.IncrementView(postID)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
jsonOK(w, map[string]any{
|
||
"post_id": postID,
|
||
"views": next,
|
||
})
|
||
}
|
||
}
|
||
|
||
// ── GET /feed/author/{pub} ────────────────────────────────────────────────
|
||
|
||
func feedAuthor(cfg FeedConfig) http.HandlerFunc {
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
pub := strings.TrimPrefix(r.URL.Path, "/feed/author/")
|
||
pub = strings.Trim(pub, "/")
|
||
if pub == "" {
|
||
jsonErr(w, fmt.Errorf("author pub required"), 400)
|
||
return
|
||
}
|
||
limit := queryInt(r, "limit", 30)
|
||
beforeTs := queryInt64(r, "before", 0) // pagination cursor (unix seconds)
|
||
|
||
// Prefer chain-authoritative list (includes soft-deleted flag) so
|
||
// clients can't be fooled by a stale relay that has an already-
|
||
// deleted post. If chain isn't wired, fall back to relay index.
|
||
if cfg.PostsByAuthor != nil {
|
||
records, err := cfg.PostsByAuthor(pub, beforeTs, limit)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
out := make([]feedAuthorItem, 0, len(records))
|
||
for _, rec := range records {
|
||
if rec == nil || rec.Deleted {
|
||
continue
|
||
}
|
||
out = append(out, buildAuthorItem(cfg, rec))
|
||
}
|
||
jsonOK(w, map[string]any{"author": pub, "count": len(out), "posts": out})
|
||
return
|
||
}
|
||
// Fallback: relay index (no chain). Doesn't support `before` yet;
|
||
// the chain-authoritative path above is what production serves.
|
||
ids, err := cfg.Mailbox.PostsByAuthor(pub, limit)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
out := expandByID(cfg, ids)
|
||
jsonOK(w, map[string]any{"author": pub, "count": len(out), "posts": out})
|
||
}
|
||
}
|
||
|
||
// feedAuthorItem is a chain record enriched with the body and live stats.
|
||
type feedAuthorItem struct {
|
||
PostID string `json:"post_id"`
|
||
Author string `json:"author"`
|
||
Content string `json:"content,omitempty"`
|
||
ContentType string `json:"content_type,omitempty"`
|
||
Hashtags []string `json:"hashtags,omitempty"`
|
||
ReplyTo string `json:"reply_to,omitempty"`
|
||
QuoteOf string `json:"quote_of,omitempty"`
|
||
CreatedAt int64 `json:"created_at"`
|
||
Size uint64 `json:"size"`
|
||
HostingRelay string `json:"hosting_relay"`
|
||
Views uint64 `json:"views"`
|
||
Likes uint64 `json:"likes"`
|
||
HasAttachment bool `json:"has_attachment"`
|
||
}
|
||
|
||
func buildAuthorItem(cfg FeedConfig, rec *blockchain.PostRecord) feedAuthorItem {
|
||
item := feedAuthorItem{
|
||
PostID: rec.PostID,
|
||
Author: rec.Author,
|
||
ReplyTo: rec.ReplyTo,
|
||
QuoteOf: rec.QuoteOf,
|
||
CreatedAt: rec.CreatedAt,
|
||
Size: rec.Size,
|
||
HostingRelay: rec.HostingRelay,
|
||
}
|
||
if body, _ := cfg.Mailbox.Get(rec.PostID); body != nil {
|
||
item.Content = body.Content
|
||
item.ContentType = body.ContentType
|
||
item.Hashtags = body.Hashtags
|
||
item.HasAttachment = len(body.Attachment) > 0
|
||
}
|
||
if cfg.LikeCount != nil {
|
||
item.Likes, _ = cfg.LikeCount(rec.PostID)
|
||
}
|
||
item.Views, _ = cfg.Mailbox.ViewCount(rec.PostID)
|
||
return item
|
||
}
|
||
|
||
// expandByID fetches bodies+stats for a list of post IDs (no chain record).
|
||
func expandByID(cfg FeedConfig, ids []string) []feedAuthorItem {
|
||
out := make([]feedAuthorItem, 0, len(ids))
|
||
for _, id := range ids {
|
||
body, _ := cfg.Mailbox.Get(id)
|
||
if body == nil {
|
||
continue
|
||
}
|
||
item := feedAuthorItem{
|
||
PostID: id,
|
||
Author: body.Author,
|
||
Content: body.Content,
|
||
ContentType: body.ContentType,
|
||
Hashtags: body.Hashtags,
|
||
ReplyTo: body.ReplyTo,
|
||
QuoteOf: body.QuoteOf,
|
||
CreatedAt: body.CreatedAt,
|
||
HasAttachment: len(body.Attachment) > 0,
|
||
}
|
||
if cfg.LikeCount != nil {
|
||
item.Likes, _ = cfg.LikeCount(id)
|
||
}
|
||
item.Views, _ = cfg.Mailbox.ViewCount(id)
|
||
out = append(out, item)
|
||
}
|
||
return out
|
||
}
|
||
|
||
// ── GET /feed/timeline ────────────────────────────────────────────────────
|
||
|
||
func feedTimeline(cfg FeedConfig) http.HandlerFunc {
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
follower := r.URL.Query().Get("follower")
|
||
if follower == "" {
|
||
jsonErr(w, fmt.Errorf("follower parameter required"), 400)
|
||
return
|
||
}
|
||
if cfg.Following == nil || cfg.PostsByAuthor == nil {
|
||
jsonErr(w, fmt.Errorf("timeline requires chain queries"), 503)
|
||
return
|
||
}
|
||
limit := queryInt(r, "limit", 30)
|
||
beforeTs := queryInt64(r, "before", 0) // pagination cursor
|
||
perAuthor := limit
|
||
if perAuthor > 30 {
|
||
perAuthor = 30
|
||
}
|
||
|
||
following, err := cfg.Following(follower)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
var merged []*blockchain.PostRecord
|
||
for _, target := range following {
|
||
posts, err := cfg.PostsByAuthor(target, beforeTs, perAuthor)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
for _, p := range posts {
|
||
if p != nil && !p.Deleted {
|
||
merged = append(merged, p)
|
||
}
|
||
}
|
||
}
|
||
// Sort newest-first, take top N.
|
||
sort.Slice(merged, func(i, j int) bool { return merged[i].CreatedAt > merged[j].CreatedAt })
|
||
if len(merged) > limit {
|
||
merged = merged[:limit]
|
||
}
|
||
out := make([]feedAuthorItem, 0, len(merged))
|
||
for _, rec := range merged {
|
||
out = append(out, buildAuthorItem(cfg, rec))
|
||
}
|
||
jsonOK(w, map[string]any{"count": len(out), "posts": out})
|
||
}
|
||
}
|
||
|
||
// ── GET /feed/trending ────────────────────────────────────────────────────
|
||
|
||
func feedTrending(cfg FeedConfig) http.HandlerFunc {
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
limit := queryInt(r, "limit", 30)
|
||
// Window defaults to 24h; cap 7d so a viral post from a week ago
|
||
// doesn't permanently dominate.
|
||
windowHours := queryInt(r, "window", 24)
|
||
if windowHours > 24*7 {
|
||
windowHours = 24 * 7
|
||
}
|
||
if windowHours < 1 {
|
||
windowHours = 1
|
||
}
|
||
ids, err := cfg.Mailbox.RecentPostIDs(int64(windowHours)*3600, 500)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
// Score each = likes*3 + views, honoring soft-delete.
|
||
type scored struct {
|
||
id string
|
||
score uint64
|
||
}
|
||
scoredList := make([]scored, 0, len(ids))
|
||
for _, id := range ids {
|
||
if cfg.GetPost != nil {
|
||
if rec, _ := cfg.GetPost(id); rec != nil && rec.Deleted {
|
||
continue
|
||
}
|
||
}
|
||
views, _ := cfg.Mailbox.ViewCount(id)
|
||
var likes uint64
|
||
if cfg.LikeCount != nil {
|
||
likes, _ = cfg.LikeCount(id)
|
||
}
|
||
scoredList = append(scoredList, scored{id: id, score: likes*3 + views})
|
||
}
|
||
sort.Slice(scoredList, func(i, j int) bool { return scoredList[i].score > scoredList[j].score })
|
||
if len(scoredList) > limit {
|
||
scoredList = scoredList[:limit]
|
||
}
|
||
pickedIDs := make([]string, len(scoredList))
|
||
for i, s := range scoredList {
|
||
pickedIDs[i] = s.id
|
||
}
|
||
out := expandByID(cfg, pickedIDs)
|
||
jsonOK(w, map[string]any{"count": len(out), "posts": out})
|
||
}
|
||
}
|
||
|
||
// ── GET /feed/foryou ──────────────────────────────────────────────────────
|
||
//
|
||
// Simple recommendations heuristic for v2.0.0:
|
||
// 1. Compute the set of authors the user already follows.
|
||
// 2. Fetch recent posts from the relay (last 48h).
|
||
// 3. Filter OUT posts from followed authors (those live in /timeline).
|
||
// 4. Filter OUT posts the user has already liked.
|
||
// 5. Rank remaining by (likes × 3 + views) and return top N.
|
||
//
|
||
// Future improvements (tracked as v2.2.0 "Feed algorithm"):
|
||
// - Weight by "followed-of-followed" signal (friends-of-friends boost).
|
||
// - Decay by age (exp half-life ~12h).
|
||
// - Penalise self-engagement (author liking own post).
|
||
// - Collaborative filtering on hashtag co-occurrence.
|
||
|
||
func feedForYou(cfg FeedConfig) http.HandlerFunc {
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
pub := r.URL.Query().Get("pub")
|
||
limit := queryInt(r, "limit", 30)
|
||
|
||
// Gather user's follows + likes to exclude from the candidate pool.
|
||
excludedAuthors := make(map[string]struct{})
|
||
if cfg.Following != nil && pub != "" {
|
||
if list, err := cfg.Following(pub); err == nil {
|
||
for _, a := range list {
|
||
excludedAuthors[a] = struct{}{}
|
||
}
|
||
}
|
||
}
|
||
// Post pool: last 48h on this relay.
|
||
ids, err := cfg.Mailbox.RecentPostIDs(48*3600, 500)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
type scored struct {
|
||
id string
|
||
score uint64
|
||
}
|
||
scoredList := make([]scored, 0, len(ids))
|
||
for _, id := range ids {
|
||
body, _ := cfg.Mailbox.Get(id)
|
||
if body == nil {
|
||
continue
|
||
}
|
||
if _, followed := excludedAuthors[body.Author]; followed {
|
||
continue
|
||
}
|
||
if body.Author == pub {
|
||
continue // don't recommend user's own posts
|
||
}
|
||
if cfg.GetPost != nil {
|
||
if rec, _ := cfg.GetPost(id); rec != nil && rec.Deleted {
|
||
continue
|
||
}
|
||
}
|
||
// Skip already-liked.
|
||
if cfg.HasLiked != nil && pub != "" {
|
||
if liked, _ := cfg.HasLiked(id, pub); liked {
|
||
continue
|
||
}
|
||
}
|
||
views, _ := cfg.Mailbox.ViewCount(id)
|
||
var likes uint64
|
||
if cfg.LikeCount != nil {
|
||
likes, _ = cfg.LikeCount(id)
|
||
}
|
||
// Small "seed" score so posts with no engagement still get shown
|
||
// sometimes (otherwise a silent but fresh post can't break in).
|
||
scoredList = append(scoredList, scored{id: id, score: likes*3 + views + 1})
|
||
}
|
||
sort.Slice(scoredList, func(i, j int) bool { return scoredList[i].score > scoredList[j].score })
|
||
if len(scoredList) > limit {
|
||
scoredList = scoredList[:limit]
|
||
}
|
||
pickedIDs := make([]string, len(scoredList))
|
||
for i, s := range scoredList {
|
||
pickedIDs[i] = s.id
|
||
}
|
||
out := expandByID(cfg, pickedIDs)
|
||
jsonOK(w, map[string]any{"count": len(out), "posts": out})
|
||
}
|
||
}
|
||
|
||
// ── GET /feed/hashtag/{tag} ──────────────────────────────────────────────
|
||
|
||
func feedHashtag(cfg FeedConfig) http.HandlerFunc {
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodGet {
|
||
jsonErr(w, fmt.Errorf("method not allowed"), 405)
|
||
return
|
||
}
|
||
tag := strings.TrimPrefix(r.URL.Path, "/feed/hashtag/")
|
||
tag = strings.Trim(tag, "/")
|
||
if tag == "" {
|
||
jsonErr(w, fmt.Errorf("tag required"), 400)
|
||
return
|
||
}
|
||
limit := queryInt(r, "limit", 50)
|
||
ids, err := cfg.Mailbox.PostsByHashtag(tag, limit)
|
||
if err != nil {
|
||
jsonErr(w, err, 500)
|
||
return
|
||
}
|
||
out := expandByID(cfg, ids)
|
||
jsonOK(w, map[string]any{"tag": strings.ToLower(tag), "count": len(out), "posts": out})
|
||
}
|
||
}
|
||
|
||
// (queryInt helper is shared with the rest of the node HTTP surface;
|
||
// see api_common.go.)
|