Use nostr as cache system

This commit is contained in:
koalasat 2024-07-01 18:00:17 +02:00 committed by koalasat
parent 8bc7f1d1d1
commit 3e460bb255
No known key found for this signature in database
GPG Key ID: 2F7F61C6146AB157
8 changed files with 236 additions and 3 deletions

View File

@ -8,7 +8,7 @@ from django.utils import timezone
from api.lightning.node import LNNode
from api.models import Currency, LNPayment, MarketTick, OnchainPayment, Order
from api.tasks import send_devfund_donation, send_notification
from api.tasks import send_devfund_donation, send_notification, send_order_nostr_event
from api.utils import get_minning_fee, validate_onchain_address, location_country
from chat.models import Message
@ -1208,6 +1208,8 @@ class Logics:
order.save() # update all fields
send_order_nostr_event.delay(order_id=order.id, message="new")
order.log(f"Order({order.id},{str(order)}) is public in the order book")
return

47
api/nostr.py Normal file
View File

@ -0,0 +1,47 @@
import time
import pygeohash
from nostr_sdk import Keys, Client, EventBuilder, NostrSigner, Filter
from api.models import Order
from decouple import config
class Nostr:
"""Simple nostr events manager to be used as a cache system for clients"""
async def send_new_order_event(self, order):
"""Creates the event and sends it to the coordinator relay"""
# Initialize with coordinator Keys
keys = Keys.generate()
signer = NostrSigner.keys(keys)
client = Client(signer)
# Add relays and connect
await client.add_relays(["ws://localhost:888"])
await client.connect()
event = EventBuilder(38383, "", generate_tags(order)).to_event(keys)
output = await client.send_event(event)
print(f"Nostr event sent: {output}")
def generate_tags(self, order):
return [
["d", order.id],
["name", order.maker.robot_name],
["k", order.type.lower()],
["f", order.currency],
["s", Order.Status(order.status).label],
["amt", order.last_satoshis],
["fa", order.amount],
["pm", order.payment_method.split(" ")],
["premium", order.premium_percentile],
["source", f"{config("HOST_NAME")}/{config("COORDINATOR_ALIAS")}/order/{order.id}"],
["expiration", order.expires_at.timestamp()],
["y", "robosats"],
["coordinator", config("COORDINATOR_ALIAS", cast=str)]
["z", "order"],
["n", order.network],
["layer", "lightning"],
["g", pygeohash.encode(order.latitude, order.longitude)],
["bond", order.bond]
]

View File

@ -1,3 +1,4 @@
import asyncio
from celery import shared_task
from celery.exceptions import SoftTimeLimitExceeded
@ -251,6 +252,24 @@ def cache_market():
return
@shared_task(name="", ignore_result=True, time_limit=120)
def send_order_nostr_event(order_id=None, message=None):
if order_id:
from api.models import Order
from api.nostr import Nostr
order = Order.objects.get(id=order_id)
nostr = Nostr()
if message == "new":
coroutine = nostr.send_new_order_event(order)
if coroutine:
loop = asyncio.get_event_loop()
loop.run_until_complete(coroutine)
return
@shared_task(name="send_notification", ignore_result=True, time_limit=120)
def send_notification(order_id=None, chat_message_id=None, message=None):
if order_id:

View File

@ -163,6 +163,7 @@ services:
ports:
- 8000:8000 # dev frontend build
- 12596:12596 # umbrel frontend
- 888:888 # nostr
lnd:
build: ./docker/lnd
@ -226,6 +227,14 @@ services:
volumes:
- ./node/db:/var/lib/postgresql/data
rnostr:
build: https://github.com/rnostr/rnostr.git
container_name: rnostr-dev
restart: unless-stopped
volumes:
- ./nodeapp/rnostr/config/rnostr.toml:/rnostr/config/rnostr.toml:r
network_mode: service:tor
# # Postgresql for CLN
# postgres-cln:
# image: postgres:14.2-alpine

View File

@ -182,7 +182,7 @@ services:
# celery-worker:
# image: backend-image
# pull_policy: never
# container_name: celery-worker
# container_name: test-celery-worker
# restart: always
# environment:
# DEVELOPMENT: True
@ -205,6 +205,15 @@ services:
# - redis
# network_mode: service:bitcoind
rnostr:
build: https://github.com/rnostr/rnostr.git
container_name: test-rnostr
restart: unless-stopped
volumes:
- ./nodeapp/rnostr/config/rnostr.toml:/rnostr/config/rnostr.toml:r
network_mode: service:bitcoind
volumes:
redisdata:
bitcoin:

View File

@ -0,0 +1,145 @@
# Configuration
# All duration format reference https://docs.rs/duration-str/latest/duration_str/
#
# config relay information
[information]
name = "rnostr"
description = "A high-performance and scalable nostr relay written in Rust."
software = "https://github.com/rnostr/rnostr"
# pubkey = ""
# contact = ""
# config data path
[data]
# the data path (restart required)
# the events db path is $path/events
path = "./data"
# Query filter timeout time, default no timeout.
db_query_timeout = "100ms"
# config network
[network]
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
host = "127.0.0.1"
# Listen port (restart required)
port = 888
# real ip header (default empty)
# ie: cf-connecting-ip, x-real-ip, x-forwarded-for
# real_ip_header = "x-forwarded-for"
# redirect to other site when user access the http index page
# index_redirect_to = "https://example.com"
# heartbeat timeout (default 120 seconds, must bigger than heartbeat interval)
# How long before lack of client response causes a timeout
# heartbeat_timeout = "2m"
# heartbeat interval (default 60 seconds)
# How often heartbeat pings are sent
# heartbeat_interval = "1m"
# config thread (restart required)
[thread]
# number of http server threads (restart required)
# default 0 will use the num of cpus
# http = 0
# number of read event threads (restart required)
# default 0 will use the num of cpus
# reader = 0
[limitation]
# this is the maximum number of bytes for incoming JSON. default 512K
max_message_length = 524288
# total number of subscriptions that may be active on a single websocket connection to this relay. default 20
max_subscriptions = 1
# maximum number of filter values in each subscription. default 10
max_filters = 10
# the relay server will clamp each filter's limit value to this number. This means the client won't be able to get more than this number of events from a single subscription filter. default 300
max_limit = 300
# maximum length of subscription id as a string. default 100
max_subid_length = 100
# for authors and ids filters which are to match against a hex prefix, you must provide at least this many hex digits in the prefix. default 10
min_prefix = 10
# in any event, this is the maximum number of elements in the tags list. default 5000
max_event_tags = 15
# Events older than this will be rejected. default 3 years
max_event_time_older_than_now = 94608000
# Events newer than this will be rejected. default 15 minutes
max_event_time_newer_than_now = 900
# Metrics extension, get the metrics data from https://example.com/metrics?auth=auth_key
[metrics]
enabled = false
# change the auth key
auth = "auth_key"
# Auth extension
[auth]
enabled = false
# # Authenticate the command 'REQ' get event, subscribe filter
# [auth.req]
# # only the list IP are allowed to req
# ip_whitelist = ["127.0.0.1"]
# # only the list IP are denied to req
# ip_blacklist = ["127.0.0.1"]
# # Restrict on nip42 verified pubkey, so client needs to implement nip42 and authenticate success
# pubkey_whitelist = ["xxxxxx"]
# pubkey_blacklist = ["xxxx"]
# # Authenticate the command 'EVENT' write event
# [auth.event]
# ip_whitelist = ["127.0.0.1"]
# ip_blacklist = ["127.0.0.1"]
# # Restrict on nip42 verified pubkey, so client needs to implement nip42 and authenticate success
# pubkey_whitelist = ["xxxxxx"]
# pubkey_blacklist = ["xxxx"]
# # Restrict on event author pubkey, No need nip42 authentication
# event_pubkey_whitelist = ["xxxxxx"]
# event_pubkey_blacklist = ["xxxx"]
# IP Rate limiter extension
[rate_limiter]
enabled = false
# # interval at second for clearing invalid data to free up memory.
# # 0 will be converted to default 60 seconds
# clear_interval = "60s"
# # rate limiter ruler list when write event per user client IP
# [[rate_limiter.event]]
# # name of rate limiter, used by metrics
# name = "all"
# # description will notice the user when rate limiter exceeded
# description = "allow only ten events per minute"
# period = "1m"
# limit = 10
# # only limit for kinds
# # support kind list: [1, 2, 3]
# # kind ranges included(start) to excluded(end): [[0, 10000], [30000, 40000]]
# # mixed: [1, 2, [30000, 40000]]
# kinds = [[0, 40000]]
# # skip when ip in whitelist
# ip_whitelist = ["127.0.0.1"]
# [[rate_limiter.event]]
# name = "kind 10000"
# description = "allow only five write events per minute when event kind between 0 to 10000"
# period = "60s"
# limit = 5
# kinds = [[0, 10000]]
# NIP-45 Count extension
# use carefully. see README.md#count
[count]
enabled = false
# NIP-50 Search extension
# use carefully. see README.md#search
[search]
enabled = false

View File

@ -28,3 +28,4 @@ drf-spectacular==0.27.2
drf-spectacular-sidecar==2024.7.1
django-cors-headers==4.4.0
base91==1.0.1
nostr-sdk==0.32.2

View File

@ -5,7 +5,7 @@ from django.urls import reverse
from api.management.commands.clean_orders import Command as CleanOrders
from api.management.commands.follow_invoices import Command as FollowInvoices
from api.models import Order
from api.tasks import follow_send_payment, send_notification
from api.tasks import follow_send_payment, send_notification, send_order_nostr_event
from tests.utils.node import (
add_invoice,
create_address,
@ -156,6 +156,7 @@ class Trade:
wait_nodes_sync()
@patch("api.tasks.send_notification.delay", send_notification)
@patch("api.tasks.send_order_nostr_event.delay", send_order_nostr_event)
def publish_order(self):
# Maker's first order fetch. Should trigger maker bond hold invoice generation.
self.get_order()