Files
dchain/node/feed_e2e_test.go
vsecoder a75cbcd224 feat: resource caps, Saved Messages, author walls, docs for node bring-up
Node flags (cmd/node/main.go):
  --max-cpu / --max-ram-mb — Go runtime caps (GOMAXPROCS / GOMEMLIMIT)
  --feed-disk-limit-mb — hard 507 refusal for new post bodies over quota
  --chain-disk-limit-mb — advisory watcher (can't reject blocks without
  breaking consensus; logs WARN every minute)

Client — Saved Messages (self-chat):
  - Auto-created on sign-in, pinned top of chat list, blue bookmark avatar
  - Send short-circuits the relay (no encrypt, no fee, no mailbox hop)
  - Empty state rendered outside inverted FlatList — fixes the mirrored
    "say hi…" on Android RTL-aware layout builds
  - PostCard shows "You" for own posts instead of the self-contact alias

Client — user walls:
  - New route /(app)/feed/author/[pub] with infinite-scroll via
    `created_at` cursor and pull-to-refresh
  - Profile screen gains "View posts" button (universal) next to
    "Open chat" (contact-only)

Feed pipeline:
  - Bump client JPEG quality 0.5 → 0.75 to match server scrubber (Q=75),
    so a 60 KiB compose doesn't balloon past 256 KiB after server re-encode
  - ErrPostTooLarge now wraps with the actual size vs cap, errors.Is
    preserved in the HTTP layer
  - FeedMailbox quota + DiskUsage surface — supports new CLI flag

README:
  - Step-by-step "first node / joiner" section on the landing page,
    full flag tables incl. the new resource-cap group, minimal
    checklists for open/private/low-end deployments
2026-04-19 13:14:47 +03:00

832 lines
26 KiB
Go
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// End-to-end integration tests for the social feed (v2.0.0).
//
// These tests exercise the full HTTP surface against a real in-process
// setup: a BadgerDB chain, a BadgerDB feed-mailbox, the media scrubber,
// and a net/http ServeMux with all feed routes wired. Requests hit the
// real handlers (including rate-limiters, auth, and scrubber) so we
// catch wire-level regressions that unit tests miss.
//
// Layout of a typical test:
//
// h := newFeedHarness(t)
// defer h.Close()
// author := h.newUser("alice")
// h.fund(author, 1_000_000) // give them tokens
// resp := h.publish(author, "Hello #world", nil) // POST /feed/publish
// h.commitCreatePost(author, resp) // chain tx
// got := h.getPost(resp.PostID)
// ...
package node
import (
"bytes"
"context"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"image"
"image/color"
"image/jpeg"
"io"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"go-blockchain/blockchain"
"go-blockchain/identity"
"go-blockchain/media"
"go-blockchain/relay"
)
// ── Harness ──────────────────────────────────────────────────────────────
type feedHarness struct {
t *testing.T
chainDir string
feedDir string
chain *blockchain.Chain
mailbox *relay.FeedMailbox
scrubber *media.Scrubber
server *httptest.Server
validator *identity.Identity
tip *blockchain.Block
}
func newFeedHarness(t *testing.T) *feedHarness {
t.Helper()
chainDir, err := os.MkdirTemp("", "dchain-e2e-chain-*")
if err != nil {
t.Fatalf("MkdirTemp chain: %v", err)
}
feedDir, err := os.MkdirTemp("", "dchain-e2e-feed-*")
if err != nil {
t.Fatalf("MkdirTemp feed: %v", err)
}
c, err := blockchain.NewChain(chainDir)
if err != nil {
t.Fatalf("NewChain: %v", err)
}
fm, err := relay.OpenFeedMailbox(feedDir, 24*time.Hour, 0)
if err != nil {
t.Fatalf("OpenFeedMailbox: %v", err)
}
validator, err := identity.Generate()
if err != nil {
t.Fatalf("identity.Generate: %v", err)
}
// Bootstrap a genesis block so the validator has funds to disburse.
genesis := blockchain.GenesisBlock(validator.PubKeyHex(), validator.PrivKey)
if err := c.AddBlock(genesis); err != nil {
t.Fatalf("AddBlock genesis: %v", err)
}
scrubber := media.NewScrubber(media.SidecarConfig{}) // no sidecar — images only
cfg := FeedConfig{
Mailbox: fm,
HostingRelayPub: validator.PubKeyHex(),
Scrubber: scrubber,
AllowUnscrubbedVideo: false,
GetPost: c.Post,
LikeCount: c.LikeCount,
HasLiked: c.HasLiked,
PostsByAuthor: c.PostsByAuthor,
Following: c.Following,
}
mux := http.NewServeMux()
RegisterFeedRoutes(mux, cfg)
srv := httptest.NewServer(mux)
h := &feedHarness{
t: t, chainDir: chainDir, feedDir: feedDir,
chain: c, mailbox: fm, scrubber: scrubber,
server: srv, validator: validator, tip: genesis,
}
t.Cleanup(h.Close)
return h
}
// Close releases all handles and removes the temp directories. Safe to
// call multiple times.
func (h *feedHarness) Close() {
if h.server != nil {
h.server.Close()
h.server = nil
}
if h.mailbox != nil {
_ = h.mailbox.Close()
h.mailbox = nil
}
if h.chain != nil {
_ = h.chain.Close()
h.chain = nil
}
// Retry because Windows holds mmap files briefly after Close.
for _, dir := range []string{h.chainDir, h.feedDir} {
for i := 0; i < 20; i++ {
if err := os.RemoveAll(dir); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
}
}
// newUser generates a fresh identity. Not funded — call fund() separately.
func (h *feedHarness) newUser(label string) *identity.Identity {
h.t.Helper()
id, err := identity.Generate()
if err != nil {
h.t.Fatalf("%s identity: %v", label, err)
}
return id
}
// fund sends `amount` µT from the genesis validator to `target`, committing
// the transfer in its own block.
func (h *feedHarness) fund(target *identity.Identity, amount uint64) {
h.t.Helper()
tx := &blockchain.Transaction{
ID: h.nextTxID(h.validator.PubKeyHex(), blockchain.EventTransfer),
Type: blockchain.EventTransfer,
From: h.validator.PubKeyHex(),
To: target.PubKeyHex(),
Amount: amount,
Fee: blockchain.MinFee,
Timestamp: time.Now().UTC(),
}
h.commit(tx)
}
// commit wraps one or more txs into a block, signs, and appends.
func (h *feedHarness) commit(txs ...*blockchain.Transaction) {
h.t.Helper()
// Small sleep to guarantee distinct tx IDs across calls.
time.Sleep(2 * time.Millisecond)
var totalFees uint64
for _, tx := range txs {
totalFees += tx.Fee
}
b := &blockchain.Block{
Index: h.tip.Index + 1,
Timestamp: time.Now().UTC(),
Transactions: txs,
PrevHash: h.tip.Hash,
Validator: h.validator.PubKeyHex(),
TotalFees: totalFees,
}
b.ComputeHash()
b.Sign(h.validator.PrivKey)
if err := h.chain.AddBlock(b); err != nil {
h.t.Fatalf("AddBlock: %v", err)
}
h.tip = b
}
func (h *feedHarness) nextTxID(from string, typ blockchain.EventType) string {
// Hash (from, type, now_nanos) for uniqueness.
sum := sha256.Sum256([]byte(fmt.Sprintf("%s:%s:%d", from, typ, time.Now().UnixNano())))
return hex.EncodeToString(sum[:16])
}
// publish POSTs /feed/publish as `author` with signed request body. On
// success returns the server's response so the caller can commit the
// on-chain CREATE_POST with matching metadata.
func (h *feedHarness) publish(author *identity.Identity, content string, attachment []byte) feedPublishResponse {
h.t.Helper()
attachB64 := ""
attachMIME := ""
if len(attachment) > 0 {
attachB64 = base64.StdEncoding.EncodeToString(attachment)
attachMIME = "image/jpeg"
}
// Client-side hash matches the server's canonical bytes rule:
// publish:<post_id>:<sha256(content||attachment) hex>:<ts>
// The client knows its own attachment before any server-side scrub,
// so this is the hash over the "raw upload". The server recomputes
// over SCRUBBED bytes and returns that as content_hash — the client
// then uses server's number for CREATE_POST.
idHash := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%s",
author.PubKeyHex(), time.Now().UnixNano(), content)))
postID := hex.EncodeToString(idHash[:16])
// Build signature over CLIENT-side hash.
h256 := sha256.New()
h256.Write([]byte(content))
h256.Write(attachment)
clientHash := hex.EncodeToString(h256.Sum(nil))
ts := time.Now().Unix()
sigBytes := author.Sign([]byte(fmt.Sprintf("publish:%s:%s:%d", postID, clientHash, ts)))
req := feedPublishRequest{
PostID: postID,
Author: author.PubKeyHex(),
Content: content,
AttachmentB64: attachB64,
AttachmentMIME: attachMIME,
Sig: base64.StdEncoding.EncodeToString(sigBytes),
Ts: ts,
}
var resp feedPublishResponse
h.postJSON("/feed/publish", req, &resp)
return resp
}
// commitCreatePost sends the on-chain CREATE_POST tx that pays the
// hosting relay (this node's validator in the harness). Must be called
// after publish() so the two agree on the content hash and size.
func (h *feedHarness) commitCreatePost(author *identity.Identity, pub feedPublishResponse) {
h.t.Helper()
contentHash, err := hex.DecodeString(pub.ContentHash)
if err != nil {
h.t.Fatalf("decode content hash: %v", err)
}
payload := blockchain.CreatePostPayload{
PostID: pub.PostID,
ContentHash: contentHash,
Size: pub.Size,
HostingRelay: pub.HostingRelay,
}
pbytes, _ := json.Marshal(payload)
tx := &blockchain.Transaction{
ID: h.nextTxID(author.PubKeyHex(), blockchain.EventCreatePost),
Type: blockchain.EventCreatePost,
From: author.PubKeyHex(),
Amount: 0,
Fee: pub.EstimatedFeeUT,
Payload: pbytes,
Timestamp: time.Now().UTC(),
}
h.commit(tx)
}
// like / unlike / follow / unfollow helpers — all just small tx builders.
func (h *feedHarness) like(liker *identity.Identity, postID string) {
payload, _ := json.Marshal(blockchain.LikePostPayload{PostID: postID})
tx := &blockchain.Transaction{
ID: h.nextTxID(liker.PubKeyHex(), blockchain.EventLikePost),
Type: blockchain.EventLikePost,
From: liker.PubKeyHex(),
Fee: blockchain.MinFee,
Payload: payload,
Timestamp: time.Now().UTC(),
}
h.commit(tx)
}
func (h *feedHarness) follow(follower *identity.Identity, target string) {
tx := &blockchain.Transaction{
ID: h.nextTxID(follower.PubKeyHex(), blockchain.EventFollow),
Type: blockchain.EventFollow,
From: follower.PubKeyHex(),
To: target,
Fee: blockchain.MinFee,
Payload: []byte(`{}`),
Timestamp: time.Now().UTC(),
}
h.commit(tx)
}
// deletePost commits an on-chain EventDeletePost for the given post,
// signed by the author.
func (h *feedHarness) deletePost(author *identity.Identity, postID string) {
payload, _ := json.Marshal(blockchain.DeletePostPayload{PostID: postID})
tx := &blockchain.Transaction{
ID: h.nextTxID(author.PubKeyHex(), blockchain.EventDeletePost),
Type: blockchain.EventDeletePost,
From: author.PubKeyHex(),
Fee: blockchain.MinFee,
Payload: payload,
Timestamp: time.Now().UTC(),
}
h.commit(tx)
}
// ── HTTP helpers ──────────────────────────────────────────────────────────
func (h *feedHarness) postJSON(path string, req any, out any) {
h.t.Helper()
body, _ := json.Marshal(req)
resp, err := http.Post(h.server.URL+path, "application/json", bytes.NewReader(body))
if err != nil {
h.t.Fatalf("POST %s: %v", path, err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
raw, _ := io.ReadAll(resp.Body)
h.t.Fatalf("POST %s → %d: %s", path, resp.StatusCode, string(raw))
}
if out != nil {
if err := json.NewDecoder(resp.Body).Decode(out); err != nil {
h.t.Fatalf("decode %s response: %v", path, err)
}
}
}
func (h *feedHarness) postJSONExpectStatus(path string, req any, want int) string {
h.t.Helper()
body, _ := json.Marshal(req)
resp, err := http.Post(h.server.URL+path, "application/json", bytes.NewReader(body))
if err != nil {
h.t.Fatalf("POST %s: %v", path, err)
}
defer resp.Body.Close()
raw, _ := io.ReadAll(resp.Body)
if resp.StatusCode != want {
h.t.Fatalf("POST %s → %d, want %d: %s", path, resp.StatusCode, want, string(raw))
}
return string(raw)
}
func (h *feedHarness) getJSON(path string, out any) {
h.t.Helper()
resp, err := http.Get(h.server.URL + path)
if err != nil {
h.t.Fatalf("GET %s: %v", path, err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
raw, _ := io.ReadAll(resp.Body)
h.t.Fatalf("GET %s → %d: %s", path, resp.StatusCode, string(raw))
}
if out != nil {
if err := json.NewDecoder(resp.Body).Decode(out); err != nil {
h.t.Fatalf("decode %s response: %v", path, err)
}
}
}
// getStatus fetches path and returns status + body; doesn't fail on non-2xx.
func (h *feedHarness) getStatus(path string) (int, string) {
resp, err := http.Get(h.server.URL + path)
if err != nil {
h.t.Fatalf("GET %s: %v", path, err)
}
defer resp.Body.Close()
raw, _ := io.ReadAll(resp.Body)
return resp.StatusCode, string(raw)
}
// postRaw is for endpoints like /feed/post/{id}/view that take no body.
func (h *feedHarness) postRaw(path string, out any) {
h.t.Helper()
resp, err := http.Post(h.server.URL+path, "application/json", nil)
if err != nil {
h.t.Fatalf("POST %s: %v", path, err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
raw, _ := io.ReadAll(resp.Body)
h.t.Fatalf("POST %s → %d: %s", path, resp.StatusCode, string(raw))
}
if out != nil {
if err := json.NewDecoder(resp.Body).Decode(out); err != nil {
h.t.Fatalf("decode %s response: %v", path, err)
}
}
}
// ── Tests ─────────────────────────────────────────────────────────────────
// TestE2EFullFlow runs the whole publish → commit → read cycle end-to-end.
//
// Covers: /feed/publish signature, /feed/post/{id} body fetch, /feed/post/{id}/stats,
// /feed/post/{id}/view counter, CREATE_POST fee debit to author + credit to
// hosting relay, PostsByAuthor enrichment, DELETE soft-delete → 410.
func TestE2EFullFlow(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
h.fund(alice, 10*blockchain.Token)
hostBalBefore, _ := h.chain.Balance(h.validator.PubKeyHex())
// 1. PUBLISH → body lands in feed mailbox.
pub := h.publish(alice, "Hello from the feed #dchain #intro", nil)
if pub.PostID == "" || pub.ContentHash == "" {
t.Fatalf("publish response missing required fields: %+v", pub)
}
if pub.HostingRelay != h.validator.PubKeyHex() {
t.Errorf("hosting_relay: got %s, want %s", pub.HostingRelay, h.validator.PubKeyHex())
}
wantTags := []string{"dchain", "intro"}
if len(pub.Hashtags) != len(wantTags) {
t.Errorf("hashtags: got %v, want %v", pub.Hashtags, wantTags)
}
// Before the CREATE_POST tx lands the body is available but /stats
// says 0 likes. That's the expected "just published, not committed" state.
// 2. COMMIT on-chain CREATE_POST tx.
h.commitCreatePost(alice, pub)
// Hosting relay should have been credited tx.Fee.
hostBalAfter, _ := h.chain.Balance(h.validator.PubKeyHex())
if hostBalAfter <= hostBalBefore {
t.Errorf("hosting relay balance did not increase after CREATE_POST: %d → %d",
hostBalBefore, hostBalAfter)
}
// 3. READ via HTTP — body comes back.
var got struct {
PostID string `json:"post_id"`
Author string `json:"author"`
Content string `json:"content"`
}
h.getJSON("/feed/post/"+pub.PostID, &got)
if got.Content != "Hello from the feed #dchain #intro" {
t.Errorf("content: got %q, want original", got.Content)
}
if got.Author != alice.PubKeyHex() {
t.Errorf("author: got %s, want %s", got.Author, alice.PubKeyHex())
}
// 4. VIEW COUNTER increments.
var viewResp struct {
Views uint64 `json:"views"`
}
for i := 1; i <= 3; i++ {
h.postRaw("/feed/post/"+pub.PostID+"/view", &viewResp)
if viewResp.Views != uint64(i) {
t.Errorf("views #%d: got %d, want %d", i, viewResp.Views, i)
}
}
// 5. STATS aggregate is correct.
var stats postStatsResponse
h.getJSON("/feed/post/"+pub.PostID+"/stats", &stats)
if stats.Views != 3 {
t.Errorf("stats.views: got %d, want 3", stats.Views)
}
if stats.Likes != 0 {
t.Errorf("stats.likes: got %d, want 0", stats.Likes)
}
// 6. AUTHOR listing merges chain record + body + stats.
var authorResp struct {
Count int `json:"count"`
Posts []feedAuthorItem `json:"posts"`
}
h.getJSON("/feed/author/"+alice.PubKeyHex(), &authorResp)
if authorResp.Count != 1 {
t.Fatalf("author count: got %d, want 1", authorResp.Count)
}
if authorResp.Posts[0].Views != 3 {
t.Errorf("author post views: got %d, want 3", authorResp.Posts[0].Views)
}
if len(authorResp.Posts[0].Hashtags) != 2 {
t.Errorf("author post hashtags: got %v, want 2", authorResp.Posts[0].Hashtags)
}
// 7. DELETE → body stays in mailbox but chain marks deleted → 410 on fetch.
h.deletePost(alice, pub.PostID)
status, body := h.getStatus("/feed/post/" + pub.PostID)
if status != http.StatusGone {
t.Errorf("GET deleted post: got status %d, want 410; body: %s", status, body)
}
}
// TestE2ELikeUnlikeAffectsStats: on-chain LIKE_POST updates /stats.
func TestE2ELikeUnlikeAffectsStats(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
bob := h.newUser("bob")
h.fund(alice, 10*blockchain.Token)
h.fund(bob, 10*blockchain.Token)
pub := h.publish(alice, "likeable", nil)
h.commitCreatePost(alice, pub)
// Bob likes alice's post.
h.like(bob, pub.PostID)
var stats postStatsResponse
h.getJSON("/feed/post/"+pub.PostID+"/stats?me="+bob.PubKeyHex(), &stats)
if stats.Likes != 1 {
t.Errorf("likes after like: got %d, want 1", stats.Likes)
}
if stats.LikedByMe == nil || !*stats.LikedByMe {
t.Errorf("liked_by_me: got %v, want true", stats.LikedByMe)
}
// And a non-liker sees liked_by_me=false.
carol := h.newUser("carol")
h.getJSON("/feed/post/"+pub.PostID+"/stats?me="+carol.PubKeyHex(), &stats)
if stats.LikedByMe == nil || *stats.LikedByMe {
t.Errorf("liked_by_me for carol: got %v, want false", stats.LikedByMe)
}
}
// TestE2ETimeline: follow graph merges posts newest-first.
func TestE2ETimeline(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
bob := h.newUser("bob")
carol := h.newUser("carol")
// Fund everyone.
for _, u := range []*identity.Identity{alice, bob, carol} {
h.fund(u, 10*blockchain.Token)
}
// Alice follows bob + carol.
h.follow(alice, bob.PubKeyHex())
h.follow(alice, carol.PubKeyHex())
// Bob + carol each publish a post. Sleep 1.1s between so the tx
// timestamps land in distinct unix seconds — the chain chrono index
// is second-resolution, not millisecond.
pubBob := h.publish(bob, "post from bob", nil)
h.commitCreatePost(bob, pubBob)
time.Sleep(1100 * time.Millisecond)
pubCarol := h.publish(carol, "post from carol", nil)
h.commitCreatePost(carol, pubCarol)
var tl struct {
Count int `json:"count"`
Posts []feedAuthorItem `json:"posts"`
}
h.getJSON("/feed/timeline?follower="+alice.PubKeyHex(), &tl)
if tl.Count != 2 {
t.Fatalf("timeline count: got %d, want 2", tl.Count)
}
// Newest first — carol was published last, so her post should be [0].
if tl.Posts[0].PostID != pubCarol.PostID {
t.Errorf("timeline[0]: got %s, want carol's post %s", tl.Posts[0].PostID, pubCarol.PostID)
}
if tl.Posts[1].PostID != pubBob.PostID {
t.Errorf("timeline[1]: got %s, want bob's post %s", tl.Posts[1].PostID, pubBob.PostID)
}
}
// TestE2ETrendingRanking: post with more engagement floats to the top.
func TestE2ETrendingRanking(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
bob := h.newUser("bob")
carol := h.newUser("carol")
for _, u := range []*identity.Identity{alice, bob, carol} {
h.fund(u, 10*blockchain.Token)
}
lowPost := h.publish(alice, "low-engagement post", nil)
h.commitCreatePost(alice, lowPost)
hotPost := h.publish(alice, "hot post", nil)
h.commitCreatePost(alice, hotPost)
// Hot post gets 2 likes + 5 views; low post stays at 0.
h.like(bob, hotPost.PostID)
h.like(carol, hotPost.PostID)
var viewResp struct{ Views uint64 }
for i := 0; i < 5; i++ {
h.postRaw("/feed/post/"+hotPost.PostID+"/view", &viewResp)
}
var tr struct {
Count int `json:"count"`
Posts []feedAuthorItem `json:"posts"`
}
h.getJSON("/feed/trending?limit=10", &tr)
if tr.Count < 2 {
t.Fatalf("trending: got %d posts, want ≥2", tr.Count)
}
// Hot post MUST be first (likes × 3 + views = 11 vs 0).
if tr.Posts[0].PostID != hotPost.PostID {
t.Errorf("trending[0]: got %s, want hot post %s", tr.Posts[0].PostID, hotPost.PostID)
}
}
// TestE2EForYouFilters: recommendations exclude followed authors,
// already-liked posts, and the user's own posts.
func TestE2EForYouFilters(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice") // asking for recs
bob := h.newUser("bob") // alice follows bob → bob's posts excluded
carol := h.newUser("carol") // stranger → should surface
dave := h.newUser("dave") // post liked by alice → excluded
for _, u := range []*identity.Identity{alice, bob, carol, dave} {
h.fund(u, 10*blockchain.Token)
}
// Alice follows bob.
h.follow(alice, bob.PubKeyHex())
// Each non-alice user publishes a post, plus alice herself.
postOwn := h.publish(alice, "my own post", nil)
h.commitCreatePost(alice, postOwn)
postBob := h.publish(bob, "from bob (followed)", nil)
h.commitCreatePost(bob, postBob)
postCarol := h.publish(carol, "from carol (stranger)", nil)
h.commitCreatePost(carol, postCarol)
postDave := h.publish(dave, "from dave", nil)
h.commitCreatePost(dave, postDave)
// Alice likes dave's post — so it should NOT appear in her ForYou.
h.like(alice, postDave.PostID)
var fy struct {
Count int `json:"count"`
Posts []feedAuthorItem `json:"posts"`
}
h.getJSON("/feed/foryou?pub="+alice.PubKeyHex()+"&limit=20", &fy)
// Expected: only carol's post. The others are excluded.
seen := map[string]bool{}
for _, p := range fy.Posts {
seen[p.PostID] = true
}
if seen[postOwn.PostID] {
t.Errorf("ForYou included alice's own post %s", postOwn.PostID)
}
if seen[postBob.PostID] {
t.Errorf("ForYou included followed author bob's post %s", postBob.PostID)
}
if seen[postDave.PostID] {
t.Errorf("ForYou included already-liked post from dave %s", postDave.PostID)
}
if !seen[postCarol.PostID] {
t.Errorf("ForYou missing carol's post %s (should surface)", postCarol.PostID)
}
}
// TestE2EHashtagSearch: a tag returns only posts that used it.
func TestE2EHashtagSearch(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
h.fund(alice, 10*blockchain.Token)
goPost := h.publish(alice, "learning #golang today", nil)
h.commitCreatePost(alice, goPost)
rustPost := h.publish(alice, "later — #rust", nil)
h.commitCreatePost(alice, rustPost)
untagged := h.publish(alice, "no tags", nil)
h.commitCreatePost(alice, untagged)
var tag struct {
Tag string `json:"tag"`
Count int `json:"count"`
Posts []feedAuthorItem `json:"posts"`
}
h.getJSON("/feed/hashtag/golang", &tag)
if tag.Count != 1 || tag.Posts[0].PostID != goPost.PostID {
t.Errorf("hashtag(golang): got %+v, want [%s]", tag, goPost.PostID)
}
h.getJSON("/feed/hashtag/rust", &tag)
if tag.Count != 1 || tag.Posts[0].PostID != rustPost.PostID {
t.Errorf("hashtag(rust): got %+v, want [%s]", tag, rustPost.PostID)
}
}
// TestE2EScrubberStripsEXIF: uploaded image with EXIF canary comes back
// without the canary in the stored body. Proves server-side scrub is
// mandatory and working at the HTTP boundary.
func TestE2EScrubberStripsEXIF(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
h.fund(alice, 1*blockchain.Token)
// Build a JPEG with an injected EXIF segment containing a canary.
var jpegBuf bytes.Buffer
img := image.NewRGBA(image.Rect(0, 0, 16, 16))
for y := 0; y < 16; y++ {
for x := 0; x < 16; x++ {
img.Set(x, y, color.RGBA{uint8(x * 16), uint8(y * 16), 100, 255})
}
}
if err := jpeg.Encode(&jpegBuf, img, &jpeg.Options{Quality: 80}); err != nil {
t.Fatalf("jpeg encode: %v", err)
}
withEXIF := injectEXIFSegment(t, jpegBuf.Bytes(),
"SUPERSECRETGPS-51.5N-0.1W-iPhone-Serial-A1B2C3")
// Pre-flight: the upload bytes DO contain the canary.
if !bytes.Contains(withEXIF, []byte("SUPERSECRETGPS")) {
t.Fatalf("test setup: canary not injected")
}
pub := h.publish(alice, "look at this photo", withEXIF)
if pub.PostID == "" {
t.Fatalf("publish failed")
}
h.commitCreatePost(alice, pub)
// Fetch the stored body back. The attachment field is the cleaned bytes.
var fetched struct {
Attachment string `json:"attachment"` // base64
}
h.getJSON("/feed/post/"+pub.PostID, &fetched)
if fetched.Attachment == "" {
t.Fatalf("attachment not returned")
}
decoded, err := base64.StdEncoding.DecodeString(fetched.Attachment)
if err != nil {
t.Fatalf("decode attachment: %v", err)
}
if bytes.Contains(decoded, []byte("SUPERSECRETGPS")) {
t.Errorf("CRITICAL: EXIF canary survived server-side scrub — metadata leaked")
}
// Sanity: still a valid JPEG after scrub.
if _, err := jpeg.Decode(bytes.NewReader(decoded)); err != nil {
t.Errorf("scrubbed attachment is not a valid JPEG: %v", err)
}
}
// TestE2ERejectsMIMEMismatch: claimed MIME vs magic bytes.
func TestE2ERejectsMIMEMismatch(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
h.fund(alice, 1*blockchain.Token)
// Build a PNG but claim it's a JPEG.
fake := []byte{0x89, 'P', 'N', 'G', '\r', '\n', 0x1a, '\n',
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
ts := time.Now().Unix()
postID := "mimecheck"
hash := sha256.Sum256(append([]byte("mislabel"), fake...))
sig := alice.Sign([]byte(fmt.Sprintf("publish:%s:%s:%d",
postID, hex.EncodeToString(hash[:]), ts)))
req := feedPublishRequest{
PostID: postID,
Author: alice.PubKeyHex(),
Content: "mislabel",
AttachmentB64: base64.StdEncoding.EncodeToString(fake),
AttachmentMIME: "image/jpeg", // LIE — it's PNG magic
Sig: base64.StdEncoding.EncodeToString(sig),
Ts: ts,
}
h.postJSONExpectStatus("/feed/publish", req, http.StatusBadRequest)
}
// TestE2ERejectsBadSignature: wrong signer cannot publish.
func TestE2ERejectsBadSignature(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
eve := h.newUser("eve")
h.fund(alice, 1*blockchain.Token)
h.fund(eve, 1*blockchain.Token)
ts := time.Now().Unix()
postID := "forgery"
hash := sha256.Sum256([]byte("evil"))
// Eve signs over data but claims to be alice.
sig := eve.Sign([]byte(fmt.Sprintf("publish:%s:%s:%d",
postID, hex.EncodeToString(hash[:]), ts)))
req := feedPublishRequest{
PostID: postID,
Author: alice.PubKeyHex(), // claim alice
Content: "evil",
Sig: base64.StdEncoding.EncodeToString(sig),
Ts: ts,
}
h.postJSONExpectStatus("/feed/publish", req, http.StatusForbidden)
}
// TestE2ERejectsStaleTimestamp: publish with ts way in the past must be rejected.
func TestE2ERejectsStaleTimestamp(t *testing.T) {
h := newFeedHarness(t)
alice := h.newUser("alice")
h.fund(alice, 1*blockchain.Token)
ts := time.Now().Add(-1 * time.Hour).Unix() // 1 hour stale
postID := "stale"
hash := sha256.Sum256([]byte("old"))
sig := alice.Sign([]byte(fmt.Sprintf("publish:%s:%s:%d",
postID, hex.EncodeToString(hash[:]), ts)))
req := feedPublishRequest{
PostID: postID,
Author: alice.PubKeyHex(),
Content: "old",
Sig: base64.StdEncoding.EncodeToString(sig),
Ts: ts,
}
h.postJSONExpectStatus("/feed/publish", req, http.StatusBadRequest)
}
// injectEXIFSegment splices an APP1 EXIF segment with the given canary
// string into a JPEG. Mirrors media/scrub_test.go but local to keep the
// integration test self-contained.
func injectEXIFSegment(t *testing.T, src []byte, canary string) []byte {
t.Helper()
if len(src) < 2 || src[0] != 0xFF || src[1] != 0xD8 {
t.Fatalf("not a JPEG")
}
payload := []byte("Exif\x00\x00" + canary)
segLen := len(payload) + 2
out := make([]byte, 0, len(src)+segLen+4)
out = append(out, src[0], src[1]) // SOI
out = append(out, 0xFF, 0xE1, byte(segLen>>8), byte(segLen&0xff))
out = append(out, payload...)
out = append(out, src[2:]...)
return out
}
// Silence unused-import lint if strings gets trimmed by refactor.
var _ = strings.TrimSpace
var _ = context.TODO