mirror of
https://github.com/RoboSats/robosats.git
synced 2024-12-13 19:06:26 +00:00
Merge pull request #1362 from RoboSats/use-nostr-as-cache-system
Use nostr as cache system
This commit is contained in:
commit
ce8799d55f
@ -175,3 +175,6 @@ SLASHED_BOND_REWARD_SPLIT = 0.5
|
||||
|
||||
# Username for HTLCs escrows
|
||||
ESCROW_USERNAME = 'admin'
|
||||
|
||||
#Social
|
||||
NOSTR_NSEC = 'nsec1vxhs2zc4kqe0dhz4z2gfrdyjsrwf8pg3neeqx6w4nl8djfzdp0dqwd6rxh'
|
||||
|
@ -8,7 +8,7 @@ from django.utils import timezone
|
||||
|
||||
from api.lightning.node import LNNode
|
||||
from api.models import Currency, LNPayment, MarketTick, OnchainPayment, Order
|
||||
from api.tasks import send_devfund_donation, send_notification
|
||||
from api.tasks import send_devfund_donation, send_notification, nostr_send_order_event
|
||||
from api.utils import get_minning_fee, validate_onchain_address, location_country
|
||||
from chat.models import Message
|
||||
|
||||
@ -704,9 +704,9 @@ class Logics:
|
||||
|
||||
if context["invoice_amount"] < MIN_SWAP_AMOUNT:
|
||||
context["swap_allowed"] = False
|
||||
context[
|
||||
"swap_failure_reason"
|
||||
] = f"Order amount is smaller than the minimum swap available of {MIN_SWAP_AMOUNT} Sats"
|
||||
context["swap_failure_reason"] = (
|
||||
f"Order amount is smaller than the minimum swap available of {MIN_SWAP_AMOUNT} Sats"
|
||||
)
|
||||
order.log(
|
||||
f"Onchain payment option was not offered: amount is smaller than the minimum swap available of {MIN_SWAP_AMOUNT} Sats",
|
||||
level="WARN",
|
||||
@ -714,9 +714,9 @@ class Logics:
|
||||
return True, context
|
||||
elif context["invoice_amount"] > MAX_SWAP_AMOUNT:
|
||||
context["swap_allowed"] = False
|
||||
context[
|
||||
"swap_failure_reason"
|
||||
] = f"Order amount is bigger than the maximum swap available of {MAX_SWAP_AMOUNT} Sats"
|
||||
context["swap_failure_reason"] = (
|
||||
f"Order amount is bigger than the maximum swap available of {MAX_SWAP_AMOUNT} Sats"
|
||||
)
|
||||
order.log(
|
||||
f"Onchain payment option was not offered: amount is bigger than the maximum swap available of {MAX_SWAP_AMOUNT} Sats",
|
||||
level="WARN",
|
||||
@ -741,9 +741,9 @@ class Logics:
|
||||
)
|
||||
if not valid:
|
||||
context["swap_allowed"] = False
|
||||
context[
|
||||
"swap_failure_reason"
|
||||
] = "Not enough onchain liquidity available to offer a swap"
|
||||
context["swap_failure_reason"] = (
|
||||
"Not enough onchain liquidity available to offer a swap"
|
||||
)
|
||||
order.log(
|
||||
"Onchain payment option was not offered: onchain liquidity available to offer a swap",
|
||||
level="WARN",
|
||||
@ -1019,6 +1019,8 @@ class Logics:
|
||||
order.log("Order expired while waiting for maker bond")
|
||||
order.log("Maker bond was cancelled")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# 2.a) When maker cancels after bond
|
||||
@ -1039,6 +1041,8 @@ class Logics:
|
||||
order.log("Order cancelled by maker while public or paused")
|
||||
order.log("Maker bond was <b>unlocked</b>")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# 2.b) When maker cancels after bond and before taker bond is locked
|
||||
@ -1058,6 +1062,8 @@ class Logics:
|
||||
order.log("Maker bond was <b>unlocked</b>")
|
||||
order.log("Taker bond was <b>cancelled</b>")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# 3) When taker cancels before bond
|
||||
@ -1070,6 +1076,8 @@ class Logics:
|
||||
|
||||
order.log("Taker cancelled before locking the bond")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# 4) When taker or maker cancel after bond (before escrow)
|
||||
@ -1099,6 +1107,8 @@ class Logics:
|
||||
order.log("Maker bond was <b>settled</b>")
|
||||
order.log("Taker bond was <b>unlocked</b>")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# 4.b) When taker cancel after bond (before escrow)
|
||||
@ -1121,6 +1131,8 @@ class Logics:
|
||||
order.log("Taker bond was <b>settled</b>")
|
||||
order.log("Maker bond was <b>unlocked</b>")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# 5) When trade collateral has been posted (after escrow)
|
||||
@ -1136,6 +1148,9 @@ class Logics:
|
||||
order.log(
|
||||
f"Taker Robot({user.robot.id},{user.username}) accepted the collaborative cancellation"
|
||||
)
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# if the taker had asked, and now the maker does: cancel order, return everything
|
||||
@ -1144,6 +1159,9 @@ class Logics:
|
||||
order.log(
|
||||
f"Maker Robot({user.robot.id},{user.username}) accepted the collaborative cancellation"
|
||||
)
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
return True, None
|
||||
|
||||
# Otherwise just make true the asked for cancel flags
|
||||
@ -1181,6 +1199,8 @@ class Logics:
|
||||
order.update_status(Order.Status.CCA)
|
||||
send_notification.delay(order_id=order.id, message="collaborative_cancelled")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
order.log("Order was collaboratively cancelled")
|
||||
order.log("Maker bond was <b>unlocked</b>")
|
||||
order.log("Taker bond was <b>unlocked</b>")
|
||||
@ -1208,6 +1228,8 @@ class Logics:
|
||||
|
||||
order.save() # update all fields
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
order.log(f"Order({order.id},{str(order)}) is public in the order book")
|
||||
return
|
||||
|
||||
@ -1350,6 +1372,9 @@ class Logics:
|
||||
except Exception:
|
||||
pass
|
||||
send_notification.delay(order_id=order.id, message="order_taken_confirmed")
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
|
||||
order.log(
|
||||
f"<b>Contract formalized.</b> Maker: Robot({order.maker.robot.id},{order.maker}). Taker: Robot({order.taker.robot.id},{order.taker}). API median price {order.currency.exchange_rate} {dict(Currency.currency_choices)[order.currency.currency]}/BTC. Premium is {order.premium}%. Contract size {order.last_satoshis} Sats"
|
||||
)
|
||||
@ -1741,11 +1766,15 @@ class Logics:
|
||||
order.log(
|
||||
f"Robot({user.robot.id},{user.username}) paused the public order"
|
||||
)
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
elif order.status == Order.Status.PAU:
|
||||
order.update_status(Order.Status.PUB)
|
||||
order.log(
|
||||
f"Robot({user.robot.id},{user.username}) made public the paused order"
|
||||
)
|
||||
|
||||
nostr_send_order_event.delay(order_id=order.id)
|
||||
else:
|
||||
order.log(
|
||||
f"Robot({user.robot.id},{user.username}) tried to pause/unpause an order that was not public or paused",
|
||||
|
98
api/nostr.py
Normal file
98
api/nostr.py
Normal file
@ -0,0 +1,98 @@
|
||||
import pygeohash
|
||||
import hashlib
|
||||
import uuid
|
||||
|
||||
from asgiref.sync import sync_to_async
|
||||
from nostr_sdk import Keys, Client, EventBuilder, NostrSigner, Kind, Tag
|
||||
from api.models import Order
|
||||
from decouple import config
|
||||
|
||||
|
||||
class Nostr:
|
||||
"""Simple nostr events manager to be used as a cache system for clients"""
|
||||
|
||||
async def send_order_event(self, order):
|
||||
"""Creates the event and sends it to the coordinator relay"""
|
||||
|
||||
if config("NOSTR_NSEC", cast=str, default="") == "":
|
||||
return
|
||||
|
||||
print("Sending nostr event")
|
||||
|
||||
# Initialize with coordinator Keys
|
||||
keys = Keys.parse(config("NOSTR_NSEC", cast=str))
|
||||
signer = NostrSigner.keys(keys)
|
||||
client = Client(signer)
|
||||
|
||||
# Add relays and connect
|
||||
await client.add_relays(["ws://localhost:7777"])
|
||||
await client.connect()
|
||||
|
||||
robot_name = await self.get_robot_name(order)
|
||||
currency = await self.get_robot_currency(order)
|
||||
|
||||
event = EventBuilder(
|
||||
Kind(38383), "", self.generate_tags(order, robot_name, currency)
|
||||
).to_event(keys)
|
||||
await client.send_event(event)
|
||||
print(f"Nostr event sent: {event.as_json()}")
|
||||
|
||||
@sync_to_async
|
||||
def get_robot_name(self, order):
|
||||
return order.maker.username
|
||||
|
||||
@sync_to_async
|
||||
def get_robot_currency(self, order):
|
||||
return str(order.currency)
|
||||
|
||||
def generate_tags(self, order, robot_name, currency):
|
||||
hashed_id = hashlib.md5(
|
||||
f"{config("COORDINATOR_ALIAS", cast=str)}{order.id}".encode("utf-8")
|
||||
).hexdigest()
|
||||
|
||||
tags = [
|
||||
Tag.parse(["d", str(uuid.UUID(hashed_id))]),
|
||||
Tag.parse(["name", robot_name]),
|
||||
Tag.parse(["k", "sell" if order.type == Order.Types.SELL else "buy"]),
|
||||
Tag.parse(["f", currency]),
|
||||
Tag.parse(["s", self.get_status_tag(order)]),
|
||||
Tag.parse(["amt", "0"]),
|
||||
Tag.parse(
|
||||
["fa"] + [str(order.amount)]
|
||||
if not order.has_range
|
||||
else [str(order.min_amount), str(order.max_amount)]
|
||||
),
|
||||
Tag.parse(["pm"] + order.payment_method.split(" ")),
|
||||
Tag.parse(["premium", str(order.premium)]),
|
||||
Tag.parse(
|
||||
[
|
||||
"source",
|
||||
f"http://{config("HOST_NAME")}/order/{config("COORDINATOR_ALIAS", cast=str).lower()}/{order.id}",
|
||||
]
|
||||
),
|
||||
Tag.parse(["expiration", str(int(order.expires_at.timestamp()))]),
|
||||
Tag.parse(["y", "robosats", config("COORDINATOR_ALIAS", cast=str).lower()]),
|
||||
Tag.parse(["n", str(config("NETWORK"))]),
|
||||
Tag.parse(["layer"] + self.get_layer_tag(order)),
|
||||
Tag.parse(["bond", str(order.bond_size)]),
|
||||
Tag.parse(["z", "order"]),
|
||||
]
|
||||
|
||||
if order.latitude and order.longitude:
|
||||
tags.extend(
|
||||
[Tag.parse(["g", pygeohash.encode(order.latitude, order.longitude)])]
|
||||
)
|
||||
|
||||
return tags
|
||||
|
||||
def get_status_tag(self, order):
|
||||
if order.status == Order.Status.PUB:
|
||||
return "pending"
|
||||
else:
|
||||
return "success"
|
||||
|
||||
def get_layer_tag(self, order):
|
||||
if order.type == Order.Types.SELL:
|
||||
return ["onchain", "lightning"]
|
||||
else:
|
||||
return ["lightning"]
|
15
api/tasks.py
15
api/tasks.py
@ -1,3 +1,4 @@
|
||||
from asgiref.sync import async_to_sync
|
||||
from celery import shared_task
|
||||
from celery.exceptions import SoftTimeLimitExceeded
|
||||
|
||||
@ -251,6 +252,20 @@ def cache_market():
|
||||
return
|
||||
|
||||
|
||||
@shared_task(name="", ignore_result=True, time_limit=120)
|
||||
def nostr_send_order_event(order_id=None):
|
||||
if order_id:
|
||||
from api.models import Order
|
||||
from api.nostr import Nostr
|
||||
|
||||
order = Order.objects.get(id=order_id)
|
||||
|
||||
nostr = Nostr()
|
||||
async_to_sync(nostr.send_order_event)(order)
|
||||
|
||||
return
|
||||
|
||||
|
||||
@shared_task(name="send_notification", ignore_result=True, time_limit=120)
|
||||
def send_notification(order_id=None, chat_message_id=None, message=None):
|
||||
if order_id:
|
||||
|
@ -226,6 +226,16 @@ services:
|
||||
volumes:
|
||||
- ./node/db:/var/lib/postgresql/data
|
||||
|
||||
strfry:
|
||||
build: ./docker/strfry
|
||||
container_name: strfry-dev
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./docker/strfry/strfry.conf:/etc/strfry.conf:ro
|
||||
- ./docker/strfry/onion_urls.txt:/app/onion_urls.txt:ro
|
||||
- ./node/strfry/db:/app/strfry-db:rw
|
||||
network_mode: service:tor
|
||||
|
||||
# # Postgresql for CLN
|
||||
# postgres-cln:
|
||||
# image: postgres:14.2-alpine
|
||||
|
@ -27,6 +27,7 @@ services:
|
||||
- "9998:9998"
|
||||
- "5432:5432"
|
||||
- "6379:6379"
|
||||
- "7777:7777"
|
||||
volumes:
|
||||
- bitcoin:/bitcoin/.bitcoin/
|
||||
- ./tests/bitcoind/entrypoint.sh:/entrypoint.sh
|
||||
@ -182,7 +183,7 @@ services:
|
||||
# celery-worker:
|
||||
# image: backend-image
|
||||
# pull_policy: never
|
||||
# container_name: celery-worker
|
||||
# container_name: test-celery-worker
|
||||
# restart: always
|
||||
# environment:
|
||||
# DEVELOPMENT: True
|
||||
|
41
docker/strfry/Dockerfile
Normal file
41
docker/strfry/Dockerfile
Normal file
@ -0,0 +1,41 @@
|
||||
FROM ubuntu:jammy
|
||||
ENV TZ=Europe/London
|
||||
|
||||
RUN apt update && apt install -y --no-install-recommends \
|
||||
git g++ make pkg-config libtool ca-certificates \
|
||||
libssl-dev zlib1g-dev liblmdb-dev libflatbuffers-dev \
|
||||
libsecp256k1-dev libzstd-dev
|
||||
|
||||
# setup app
|
||||
RUN git clone https://github.com/KoalaSat/strfry /app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN git submodule update --init
|
||||
RUN make setup-golpe
|
||||
RUN make clean
|
||||
RUN make -j4
|
||||
|
||||
RUN apt update && apt install -y --no-install-recommends \
|
||||
liblmdb0 libflatbuffers1 libsecp256k1-0 libb2-1 libzstd1 torsocks cron\
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN echo "TorAddress 127.0.0.1" >> /etc/tor/torsocks.conf
|
||||
RUN echo "TorPort 9050" >> /etc/tor/torsocks.conf
|
||||
|
||||
# Setting up crontab
|
||||
COPY crontab /etc/cron.d/crontab
|
||||
RUN chmod 0644 /etc/cron.d/crontab
|
||||
RUN crontab /etc/cron.d/crontab
|
||||
|
||||
# Setting up entrypoints
|
||||
COPY sync.sh /etc/strfry/sync.sh
|
||||
COPY entrypoint.sh /etc/strfry/entrypoint.sh
|
||||
|
||||
RUN chmod +x /etc/strfry/entrypoint.sh
|
||||
RUN chmod +x /etc/strfry/sync.sh
|
||||
|
||||
#Setting up logs
|
||||
RUN touch /var/log/cron.log && chmod 0644 /var/log/cron.log
|
||||
|
||||
ENTRYPOINT ["/etc/strfry/entrypoint.sh"]
|
24
docker/strfry/crontab
Normal file
24
docker/strfry/crontab
Normal file
@ -0,0 +1,24 @@
|
||||
# Edit this file to introduce tasks to be run by cron.
|
||||
#
|
||||
# Each task to run has to be defined through a single line
|
||||
# indicating with different fields when the task will be run
|
||||
# and what command to run for the task
|
||||
#
|
||||
# To define the time you can provide concrete values for
|
||||
# minute (m), hour (h), day of month (dom), month (mon),
|
||||
# and day of week (dow) or use '*' in these fields (for 'any').
|
||||
#
|
||||
# Notice that tasks will be started based on the cron's system
|
||||
# daemon's notion of time and timezones.
|
||||
#
|
||||
# Output of the crontab jobs (including errors) is sent through
|
||||
# email to the user the crontab file belongs to (unless redirected).
|
||||
#
|
||||
# For example, you can run a backup of all your user accounts
|
||||
# at 5 a.m every week with:
|
||||
# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
|
||||
#
|
||||
# For more information see the manual pages of crontab(5) and cron(8)
|
||||
#
|
||||
# m h dom mon dow command
|
||||
*/1 * * * * torsocks /etc/strfry/sync.sh >> /var/log/cron.log 2>&1
|
3
docker/strfry/entrypoint.sh
Executable file
3
docker/strfry/entrypoint.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
cron -f -l 8 & tail -f /var/log/cron.log & /app/strfry relay
|
4
docker/strfry/onion_urls.txt
Normal file
4
docker/strfry/onion_urls.txt
Normal file
@ -0,0 +1,4 @@
|
||||
ws://testraliar7xkhos2gipv2k65obykofb4jqzl5l4danfryacifi4t7qd.onion/nostr
|
||||
ws://jpp3w5tpxtyg6lifonisdszpriiapszzem4wod2zsdweyfenlsxeoxid.onion/nostr
|
||||
ws://ghbtv7lhoyhomyir4xvxaeyqgx4ylxksia343jaat3njqqlkqpdjqcyd.onion/nostr
|
||||
ws://wsjyhbashc4zrrex6vijpryujggbka5plry2o62dxqoz3pxinblnj4ad.onion/nostr
|
138
docker/strfry/strfry.conf
Normal file
138
docker/strfry/strfry.conf
Normal file
@ -0,0 +1,138 @@
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "/app/strfry-db/"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap() to use when loading LMDB (default is 10TB, does *not* correspond to disk-space used) (restart required)
|
||||
mapsize = 10995116277760
|
||||
|
||||
# Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
|
||||
noReadAhead = false
|
||||
}
|
||||
|
||||
events {
|
||||
# Maximum size of normalised JSON, in bytes
|
||||
maxEventSize = 65536
|
||||
|
||||
# Events newer than this will be rejected
|
||||
rejectEventsNewerThanSeconds = 900
|
||||
|
||||
# Events older than this will be rejected
|
||||
rejectEventsOlderThanSeconds = 94608000
|
||||
|
||||
# Ephemeral events older than this will be rejected
|
||||
rejectEphemeralEventsOlderThanSeconds = 60
|
||||
|
||||
# Ephemeral events will be deleted from the DB when older than this
|
||||
ephemeralEventsLifetimeSeconds = 300
|
||||
|
||||
# Maximum number of tags allowed
|
||||
maxNumTags = 2000
|
||||
|
||||
# Maximum size for tag values, in bytes
|
||||
maxTagValSize = 1024
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 7777
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "Robosats"
|
||||
|
||||
# NIP-11: Detailed information about relay, free-form
|
||||
description = "Federation cache system."
|
||||
|
||||
# NIP-11: Administrative nostr pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative administrative contact (email, website, etc)
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 3
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but slight increase in CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = false
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
|
||||
# Log reason for invalid event rejection? Can be disabled to silence excessive logging
|
||||
invalidEvents = true
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# negentropy threads: Handle negentropy protocol messages (restart required)
|
||||
negentropy = 2
|
||||
}
|
||||
|
||||
negentropy {
|
||||
# Support negentropy protocol messages
|
||||
enabled = true
|
||||
|
||||
# Maximum records that sync will process before returning an error
|
||||
maxSyncEvents = 1000000
|
||||
}
|
||||
}
|
7
docker/strfry/sync.sh
Executable file
7
docker/strfry/sync.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
filters='{"kinds":[38383]}'
|
||||
|
||||
while IFS= read -r line; do
|
||||
/app/strfry --config /etc/strfry.conf sync ${line} --filter "$filters" --dir both
|
||||
done < /app/onion_urls.txt
|
@ -37,6 +37,14 @@ location /mainnet/exp/ws/ {
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
location /mainnet/exp/nostr/ {
|
||||
proxy_pass http://mainnet_exp/nostr/;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# Experimental Coordinator Testnet Locations
|
||||
location /test/exp/static/assets/avatars/ {
|
||||
proxy_pass http://testnet_exp/static/assets/avatars/;
|
||||
|
@ -37,6 +37,14 @@ location /mainnet/lake/ws/ {
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
location /mainnet/lake/nostr/ {
|
||||
proxy_pass http://mainnet_lake/nostr/;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# TheBigLake Coordinator Testnet Locations
|
||||
location /test/lake/static/assets/avatars/ {
|
||||
proxy_pass http://testnet_lake/static/assets/avatars/;
|
||||
|
@ -37,6 +37,14 @@ location /mainnet/satstralia/ws/ {
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
location /mainnet/satstralia/nostr/ {
|
||||
proxy_pass http://mainnet_satstralia/nostr/;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# Satstralia Coordinator Testnet Locations
|
||||
location /test/satstralia/static/assets/avatars/ {
|
||||
proxy_pass http://testnet_satstralia/static/assets/avatars/;
|
||||
|
@ -37,6 +37,14 @@ location /mainnet/temple/ws/ {
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
location /mainnet/temple/nostr/ {
|
||||
proxy_pass http://mainnet_temple/nostr/;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# Temple of Sats Coordinator Testnet Locations
|
||||
location /test/temple/static/assets/avatars/ {
|
||||
proxy_pass http://testnet_temple/static/assets/avatars/;
|
||||
|
@ -37,6 +37,14 @@ location /mainnet/veneto/ws/ {
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
location /mainnet/veneto/nostr/ {
|
||||
proxy_pass http://mainnet_veneto/nostr/;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# BitcoinVeneto Coordinator Testnet Locations
|
||||
location /test/veneto/static/assets/avatars/ {
|
||||
proxy_pass http://testnet_veneto/static/assets/avatars/;
|
||||
|
@ -64,6 +64,14 @@ http {
|
||||
autoindex on;
|
||||
}
|
||||
|
||||
location /nostr {
|
||||
proxy_pass http://127.0.0.1:7777;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
location = /favicon.ico {
|
||||
alias /usr/src/robosats/static/assets/images/favicon-96x96.png;
|
||||
}
|
||||
|
@ -28,3 +28,6 @@ drf-spectacular==0.27.2
|
||||
drf-spectacular-sidecar==2024.7.1
|
||||
django-cors-headers==4.4.0
|
||||
base91==1.0.1
|
||||
nostr-sdk==0.32.2
|
||||
pygeohash==1.2.0
|
||||
asgiref == 3.8.1
|
||||
|
Loading…
Reference in New Issue
Block a user