diff --git a/.golangci.yml b/.golangci.yml
index 9b6f6d3db..c89286c6e 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,5 +1,5 @@
run:
- deadline: 10m
+ timeout: 10m
output:
format: github-actions,colored-line-number
diff --git a/api/types/insightapitypes.go b/api/types/insightapitypes.go
index c0cec758d..f7e8b8a03 100644
--- a/api/types/insightapitypes.go
+++ b/api/types/insightapitypes.go
@@ -5,6 +5,7 @@
package types
import (
+ "encoding/hex"
"encoding/json"
"github.com/decred/dcrdata/v8/db/dbtypes"
@@ -80,19 +81,38 @@ type InsightPagination struct {
IsToday string `json:"isToday,omitempty"`
}
+var _ json.Unmarshaler = (*HexBytes)(nil)
+var _ json.Marshaler = HexBytes{}
+
+type HexBytes []byte
+
+func (hb HexBytes) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + hex.EncodeToString(hb) + `"`), nil // json.Marshal(hex.EncodeToString(hb)) but not absurdly inefficient
+}
+
+func (hb *HexBytes) UnmarshalJSON(b []byte) error {
+ var str string
+ err := json.Unmarshal(b, &str)
+ if err != nil {
+ return err
+ }
+ *hb, err = hex.DecodeString(str)
+ return err
+}
+
// AddressTxnOutput models an address transaction outputs.
type AddressTxnOutput struct {
- Address string `json:"address"`
- TxnID string `json:"txid"`
- Vout uint32 `json:"vout"`
- BlockTime int64 `json:"ts,omitempty"`
- ScriptPubKey string `json:"scriptPubKey"`
- Height int64 `json:"height,omitempty"`
- BlockHash string `json:"block_hash,omitempty"`
- Amount float64 `json:"amount,omitempty"`
- Atoms int64 `json:"atoms,omitempty"` // Not Required per Insight spec
- Satoshis int64 `json:"satoshis,omitempty"`
- Confirmations int64 `json:"confirmations"`
+ Address string `json:"address"`
+ TxnID string `json:"txid"`
+ Vout uint32 `json:"vout"`
+ BlockTime int64 `json:"ts,omitempty"`
+ ScriptPubKey HexBytes `json:"scriptPubKey"`
+ Height int64 `json:"height,omitempty"`
+ BlockHash string `json:"block_hash,omitempty"`
+ Amount float64 `json:"amount,omitempty"`
+ Atoms int64 `json:"atoms,omitempty"` // Not Required per Insight spec
+ Satoshis int64 `json:"satoshis,omitempty"`
+ Confirmations int64 `json:"confirmations"`
}
// TxOutFromDB converts a dbtypes.AddressTxnOutput to a api/types.AddressTxnOutput.
diff --git a/api/types/insightapitypes_test.go b/api/types/insightapitypes_test.go
new file mode 100644
index 000000000..ad8d804dd
--- /dev/null
+++ b/api/types/insightapitypes_test.go
@@ -0,0 +1,18 @@
+package types
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestHexBytes_UnmarshalJSON(t *testing.T) {
+ in := []byte(`"deadbeef"`)
+ var hb HexBytes
+ err := hb.UnmarshalJSON(in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fmt.Printf("%x\n", []byte(hb))
+ out, _ := hb.MarshalJSON()
+ fmt.Println(string(out))
+}
diff --git a/cmd/dcrdata/go.mod b/cmd/dcrdata/go.mod
index 0dc165f92..c5835df4f 100644
--- a/cmd/dcrdata/go.mod
+++ b/cmd/dcrdata/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/cmd/dcrdata
-go 1.18
+go 1.21
replace (
github.com/decred/dcrdata/db/dcrpg/v8 => ../../db/dcrpg/
@@ -147,7 +147,7 @@ require (
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
- github.com/lib/pq v1.10.4 // indirect
+ github.com/lib/pq v1.10.9 // indirect
github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf // indirect
github.com/lightninglabs/neutrino v0.14.3-0.20221024182812-792af8548c14 // indirect
github.com/lightningnetwork/lnd/clock v1.0.1 // indirect
diff --git a/cmd/dcrdata/go.sum b/cmd/dcrdata/go.sum
index 204e0fc2e..47376c2f8 100644
--- a/cmd/dcrdata/go.sum
+++ b/cmd/dcrdata/go.sum
@@ -278,6 +278,7 @@ github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
+github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -578,6 +579,7 @@ github.com/didip/tollbooth/v6 v6.1.3-0.20220606152938-a7634c70944a h1:YKCmFFZGFW
github.com/didip/tollbooth/v6 v6.1.3-0.20220606152938-a7634c70944a/go.mod h1:wop/gy+XfJK/TXFmmVbFT46jsYOMU15K0Q1lWr4gIW8=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI=
+github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
@@ -690,6 +692,7 @@ github.com/go-chi/render v1.0.1/go.mod h1:pq4Rr7HbnsdaeHagklXub+p6Wd16Af5l9koip1
github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -725,6 +728,7 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
+github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
@@ -767,6 +771,7 @@ github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
+github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -848,6 +853,7 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@@ -1132,8 +1138,8 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk=
-github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf h1:HZKvJUHlcXI/f/O0Avg7t8sqkPo78HFzjmeYFl6DPnc=
github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf/go.mod h1:vxmQPeIQxPf6Jf9rM8R+B4rKBqLA2AjttNxkFBL2Plk=
github.com/lightninglabs/neutrino v0.14.2/go.mod h1:OICUeTCn+4Tu27YRJIpWvvqySxx4oH4vgdP33Sw9RDc=
@@ -1535,6 +1541,7 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
@@ -2225,6 +2232,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180808183934-383e8b2c3b9e/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -2407,6 +2415,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
@@ -2447,7 +2456,9 @@ gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/cmd/dcrdata/internal/api/apiroutes.go b/cmd/dcrdata/internal/api/apiroutes.go
index 5dd518871..5b4be38e1 100644
--- a/cmd/dcrdata/internal/api/apiroutes.go
+++ b/cmd/dcrdata/internal/api/apiroutes.go
@@ -54,7 +54,7 @@ type DataSource interface {
GetBlockHash(idx int64) (string, error)
GetBlockHeight(hash string) (int64, error)
GetBlockByHash(string) (*wire.MsgBlock, error)
- SpendingTransaction(fundingTx string, vout uint32) (string, uint32, int8, error)
+ SpendingTransaction(fundingTx string, vout uint32) (string, uint32, error)
SpendingTransactions(fundingTxID string) ([]string, []uint32, []uint32, error)
AddressHistory(address string, N, offset int64, txnType dbtypes.AddrTxnViewType) ([]*dbtypes.AddressRow, *dbtypes.AddressBalance, error)
FillAddressTransactions(addrInfo *dbtypes.AddressInfo) error
@@ -80,7 +80,7 @@ type DataSource interface {
GetStakeInfoExtendedByHash(hash string) *apitypes.StakeInfoExtended
GetStakeInfoExtendedByHeight(idx int) *apitypes.StakeInfoExtended
GetPoolInfo(idx int) *apitypes.TicketPoolInfo
- GetPoolInfoByHash(hash string) *apitypes.TicketPoolInfo
+ // GetPoolInfoByHash(hash string) *apitypes.TicketPoolInfo
GetPoolInfoRange(idx0, idx1 int) []apitypes.TicketPoolInfo
GetPoolValAndSizeRange(idx0, idx1 int) ([]float64, []uint32)
GetPool(idx int64) ([]string, error)
@@ -1667,6 +1667,11 @@ func (c *appContext) addressIoCsv(crlf bool, w http.ResponseWriter, r *http.Requ
strDirection = "-1"
}
+ var matchingTx string
+ if r.MatchingTxHash != nil {
+ matchingTx = r.MatchingTxHash.String()
+ }
+
err = writer.Write([]string{
r.TxHash.String(),
strDirection,
@@ -1675,7 +1680,7 @@ func (c *appContext) addressIoCsv(crlf bool, w http.ResponseWriter, r *http.Requ
strconv.FormatFloat(dcrutil.Amount(r.Value).ToCoin(), 'f', -1, 64),
strconv.FormatInt(r.TxBlockTime, 10),
txhelpers.TxTypeToString(int(r.TxType)),
- r.MatchingTxHash.String(),
+ matchingTx,
})
if err != nil {
return // too late to write an error code
diff --git a/cmd/dcrdata/internal/api/insight/apiroutes.go b/cmd/dcrdata/internal/api/insight/apiroutes.go
index 92a85215a..309446019 100644
--- a/cmd/dcrdata/internal/api/insight/apiroutes.go
+++ b/cmd/dcrdata/internal/api/insight/apiroutes.go
@@ -33,7 +33,7 @@ import (
type BlockDataSource interface {
AddressBalance(address string) (bal *dbtypes.AddressBalance, cacheUpdated bool, err error)
- AddressIDsByOutpoint(txHash string, voutIndex uint32) ([]uint64, []string, int64, error)
+ OutpointAddresses(txHash string, voutIndex uint32) ([]string, int64, error)
AddressUTXO(address string) ([]*dbtypes.AddressTxnOutput, bool, error)
BlockSummaryTimeRange(min, max int64, limit int) ([]dbtypes.BlockDataBasic, error)
GetBlockHash(idx int64) (string, error)
@@ -130,7 +130,7 @@ func writeJSON(w http.ResponseWriter, thing interface{}, indent string) {
func writeInsightError(w http.ResponseWriter, str string) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(http.StatusBadRequest)
- io.WriteString(w, str)
+ io.WriteString(w, str) //nolint:errcheck
}
// Insight API response for an item NOT FOUND. This means the request was valid
@@ -140,7 +140,7 @@ func writeInsightError(w http.ResponseWriter, str string) {
func writeInsightNotFound(w http.ResponseWriter, str string) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(http.StatusNotFound)
- io.WriteString(w, str)
+ io.WriteString(w, str) //nolint:errcheck
}
func (iapi *InsightApi) getTransaction(w http.ResponseWriter, r *http.Request) {
@@ -459,7 +459,7 @@ func (iapi *InsightApi) getAddressesTxnOutput(w http.ResponseWriter, r *http.Req
// need to do one more search on utxo and do not add if this is
// already in the list as a confirmed tx.
for _, utxo := range confirmedTxnOutputs {
- if utxo.Vout == f.Index && utxo.TxHash == f.Hash {
+ if utxo.Vout == f.Index && utxo.TxHash == dbtypes.ChainHash(f.Hash) {
continue FUNDING_TX_DUPLICATE_CHECK
}
}
@@ -473,7 +473,7 @@ func (iapi *InsightApi) getAddressesTxnOutput(w http.ResponseWriter, r *http.Req
TxnID: fundingTx.Hash().String(),
Vout: f.Index,
BlockTime: fundingTx.MemPoolTime,
- ScriptPubKey: hex.EncodeToString(txOut.PkScript),
+ ScriptPubKey: txOut.PkScript,
Amount: dcrutil.Amount(txOut.Value).ToCoin(),
Satoshis: txOut.Value,
Confirmations: 0,
diff --git a/cmd/dcrdata/internal/api/insight/converter.go b/cmd/dcrdata/internal/api/insight/converter.go
index 94455c7c0..acfe48309 100644
--- a/cmd/dcrdata/internal/api/insight/converter.go
+++ b/cmd/dcrdata/internal/api/insight/converter.go
@@ -79,7 +79,7 @@ func (iapi *InsightApi) DcrToInsightTxns(txs []*chainjson.TxRawResult, noAsm, no
// work if the funding transaction is confirmed. Otherwise use RPC
// to get the funding transaction outpoint addresses.
if !vinGenerated {
- _, addresses, _, err := iapi.BlockData.AddressIDsByOutpoint(vin.Txid, vin.Vout)
+ addresses, _, err := iapi.BlockData.OutpointAddresses(vin.Txid, vin.Vout)
if err == nil && len(addresses) > 0 {
InsightVin.Addr = addresses[0]
} else {
diff --git a/cmd/dcrdata/internal/api/insight/socket.io.go b/cmd/dcrdata/internal/api/insight/socket.io.go
index 861d920eb..4a87715e8 100644
--- a/cmd/dcrdata/internal/api/insight/socket.io.go
+++ b/cmd/dcrdata/internal/api/insight/socket.io.go
@@ -207,7 +207,9 @@ func NewSocketServer(params *chaincfg.Params, txGetter txhelpers.RawTransactionG
apiLog.Infof("Started Insight socket.io server.")
- go server.Serve()
+ go func() {
+ apiLog.Warnf("SocketServer.Serve: %v", server.Serve())
+ }()
return server, nil
}
diff --git a/cmd/dcrdata/internal/explorer/explorer.go b/cmd/dcrdata/internal/explorer/explorer.go
index bfbbaf9e8..2e007f90f 100644
--- a/cmd/dcrdata/internal/explorer/explorer.go
+++ b/cmd/dcrdata/internal/explorer/explorer.go
@@ -70,7 +70,7 @@ type explorerDataSource interface {
Height() int64
HeightDB() (int64, error)
BlockHash(height int64) (string, error)
- SpendingTransaction(fundingTx string, vout uint32) (string, uint32, int8, error)
+ SpendingTransaction(fundingTx string, vout uint32) (string, uint32, error)
SpendingTransactions(fundingTxID string) ([]string, []uint32, []uint32, error)
PoolStatusForTicket(txid string) (dbtypes.TicketSpendType, dbtypes.TicketPoolStatus, error)
TreasuryBalance() (*dbtypes.TreasuryBalance, error)
@@ -89,7 +89,7 @@ type explorerDataSource interface {
TicketPoolVisualization(interval dbtypes.TimeBasedGrouping) (*dbtypes.PoolTicketsData, *dbtypes.PoolTicketsData, *dbtypes.PoolTicketsData, int64, error)
TransactionBlocks(hash string) ([]*dbtypes.BlockStatus, []uint32, error)
Transaction(txHash string) ([]*dbtypes.Tx, error)
- VinsForTx(*dbtypes.Tx) (vins []dbtypes.VinTxProperty, prevPkScripts []string, scriptVersions []uint16, err error)
+ VinsForTx(*dbtypes.Tx) (vins []dbtypes.VinTxProperty, err error)
VoutsForTx(*dbtypes.Tx) ([]dbtypes.Vout, error)
PosIntervals(limit, offset uint64) ([]*dbtypes.BlocksGroupedInfo, error)
TimeBasedIntervals(timeGrouping dbtypes.TimeBasedGrouping, limit, offset uint64) ([]*dbtypes.BlocksGroupedInfo, error)
diff --git a/cmd/dcrdata/internal/explorer/explorerroutes.go b/cmd/dcrdata/internal/explorer/explorerroutes.go
index 2cf68169c..06515c000 100644
--- a/cmd/dcrdata/internal/explorer/explorerroutes.go
+++ b/cmd/dcrdata/internal/explorer/explorerroutes.go
@@ -6,7 +6,6 @@ package explorer
import (
"context"
- "encoding/hex"
"encoding/json"
"errors"
"fmt"
@@ -23,11 +22,8 @@ import (
"github.com/decred/dcrd/chaincfg/v3"
"github.com/decred/dcrd/dcrutil/v4"
- chainjson "github.com/decred/dcrd/rpc/jsonrpc/types/v4"
- "github.com/decred/dcrd/txscript/v4"
"github.com/decred/dcrd/txscript/v4/stdaddr"
"github.com/decred/dcrd/txscript/v4/stdscript"
- "github.com/decred/dcrd/wire"
"github.com/decred/dcrdata/exchanges/v3"
"github.com/decred/dcrdata/gov/v6/agendas"
@@ -37,7 +33,6 @@ import (
"github.com/decred/dcrdata/v8/txhelpers"
ticketvotev1 "github.com/decred/politeia/politeiawww/api/ticketvote/v1"
- humanize "github.com/dustin/go-humanize"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)
@@ -244,7 +239,7 @@ func (exp *explorerUI) Home(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
- io.WriteString(w, str)
+ io.WriteString(w, str) //nolint:errcheck
}
// SideChains is the page handler for the "/side" path.
@@ -275,7 +270,7 @@ func (exp *explorerUI) SideChains(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
- io.WriteString(w, str)
+ io.WriteString(w, str) //nolint:errcheck
}
// InsightRootPage is the page for the "/insight" path.
@@ -293,7 +288,7 @@ func (exp *explorerUI) InsightRootPage(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
- io.WriteString(w, str)
+ io.WriteString(w, str) //nolint:errcheck
}
// DisapprovedBlocks is the page handler for the "/disapproved" path.
@@ -324,7 +319,7 @@ func (exp *explorerUI) DisapprovedBlocks(w http.ResponseWriter, r *http.Request)
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
- io.WriteString(w, str)
+ io.WriteString(w, str) //nolint:errcheck
}
// VisualBlocks is the page handler for the "/visualblocks" path.
@@ -746,7 +741,7 @@ func (exp *explorerUI) Block(w http.ResponseWriter, r *http.Request) {
log.Warnf("Unable to retrieve chain status for block %s: %v", hash, err)
}
for i, block := range altBlocks {
- if block.Hash == hash {
+ if block.Hash.String() == hash {
data.Valid = block.IsValid
data.MainChain = block.IsMainchain
altBlocks = append(altBlocks[:i], altBlocks[i+1:]...)
@@ -852,8 +847,16 @@ func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
tx := exp.dataSource.GetExplorerTx(hash)
// If dcrd has no information about the transaction, pull the transaction
- // details from the auxiliary DB database.
+ // details from the auxiliary DB database. Several pieces of information may
+ // be missing, namely the pkScripts and various information, including the
+ // prevout addresses.
if tx == nil {
+ exp.StatusPage(w, defaultErrorCode, "could not find that transaction",
+ "", ExpStatusNotFound)
+ return
+
+ /* maybe some day, but this is pointless and costly for now
+
log.Warnf("No transaction information for %v. Trying tables in case this is an orphaned txn.", hash)
// Search for occurrences of the transaction in the database.
dbTxs, err := exp.dataSource.Transaction(hash)
@@ -897,9 +900,9 @@ func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
// Vouts - looked-up in vouts table
BlockHeight: dbTx0.BlockHeight,
BlockIndex: dbTx0.BlockIndex,
- BlockHash: dbTx0.BlockHash,
+ BlockHash: dbTx0.BlockHash.String(),
Confirmations: exp.Height() - dbTx0.BlockHeight + 1,
- Time: types.TimeDef(dbTx0.Time),
+ Time: types.TimeDef(dbTx0.BlockTime),
}
// Coinbase transactions are regular, but call them coinbase for the page.
@@ -924,17 +927,17 @@ func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
// Convert to explorer.Vout, getting spending information from DB.
for iv := range vouts {
// Check pkScript for OP_RETURN and OP_TADD.
- pkScript := vouts[iv].ScriptPubKey
- opTAdd := len(pkScript) > 0 && pkScript[0] == txscript.OP_TADD
- var opReturn string
- if !opTAdd {
- asm, _ := txscript.DisasmString(pkScript)
- if strings.HasPrefix(asm, "OP_RETURN") {
- opReturn = asm
- }
- }
+ // pkScript := vouts[iv].ScriptPubKey
+ // opTAdd := len(pkScript) > 0 && pkScript[0] == txscript.OP_TADD
+ // var opReturn string
+ // if !opTAdd {
+ // asm, _ := txscript.DisasmString(pkScript)
+ // if strings.HasPrefix(asm, "OP_RETURN") {
+ // opReturn = asm
+ // }
+ // }
// Determine if the outpoint is spent
- spendingTx, _, _, err := exp.dataSource.SpendingTransaction(hash, vouts[iv].TxIndex)
+ spendingTx, _, err := exp.dataSource.SpendingTransaction(hash, vouts[iv].TxIndex)
if exp.timeoutErrorPage(w, err, "SpendingTransaction") {
return
}
@@ -949,15 +952,15 @@ func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
FormattedAmount: humanize.Commaf(amount),
Type: vouts[iv].ScriptPubKeyData.Type.String(),
Spent: spendingTx != "",
- OP_RETURN: opReturn,
- OP_TADD: opTAdd,
- Index: vouts[iv].TxIndex,
- Version: vouts[iv].Version,
+ // OP_RETURN: opReturn,
+ // OP_TADD: opTAdd,
+ Index: vouts[iv].TxIndex,
+ Version: vouts[iv].Version,
})
}
// Retrieve vins from DB.
- vins, prevPkScripts, scriptVersions, err := exp.dataSource.VinsForTx(dbTx0)
+ vins, err := exp.dataSource.VinsForTx(dbTx0)
if exp.timeoutErrorPage(w, err, "VinsForTx") {
return
}
@@ -971,15 +974,15 @@ func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
// Convert to explorer.Vin from dbtypes.VinTxProperty.
for iv := range vins {
// Decode all addresses from previous outpoint's pkScript.
- var addresses []string
- pkScriptsStr, err := hex.DecodeString(prevPkScripts[iv])
- if err != nil {
- log.Errorf("Failed to decode pkScript: %v", err)
- }
- _, scrAddrs := stdscript.ExtractAddrs(scriptVersions[iv], pkScriptsStr, exp.ChainParams)
- for ia := range scrAddrs {
- addresses = append(addresses, scrAddrs[ia].String())
- }
+ // var addresses []string
+ // pkScriptsStr, err := hex.DecodeString(prevPkScripts[iv])
+ // if err != nil {
+ // log.Errorf("Failed to decode pkScript: %v", err)
+ // }
+ // _, scrAddrs := stdscript.ExtractAddrs(scriptVersions[iv], pkScriptsStr, exp.ChainParams)
+ // for ia := range scrAddrs {
+ // addresses = append(addresses, scrAddrs[ia].String())
+ // }
// If the scriptsig does not decode or disassemble, oh well.
asm, _ := txscript.DisasmString(vins[iv].ScriptSig)
@@ -1017,7 +1020,7 @@ func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
Hex: hex.EncodeToString(vins[iv].ScriptSig),
},
},
- Addresses: addresses,
+ Addresses: []string{"unknown"}, // addresses,
FormattedAmount: humanize.Commaf(amount),
Index: txIndex,
})
@@ -1049,9 +1052,10 @@ func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
tx.Mature = "True"
}
}
+ */
} // tx == nil (not found by dcrd)
- // Check for any transaction outputs that appear unspent.
+ // Check for any transaction outputs that *appear* unspent.
unspents := types.UnspentOutputIndices(tx.Vout)
if len(unspents) > 0 {
// Grab the mempool transaction inputs that match this transaction.
@@ -1976,7 +1980,8 @@ func (exp *explorerUI) Search(w http.ResponseWriter, r *http.Request) {
}
// If it is not a valid hash, try proposals and give up.
- if _, err = chainhash.NewHashFromStr(searchStrSplit[0]); err != nil {
+ hash, err := chainhash.NewHashFromStr(searchStrSplit[0])
+ if err != nil {
if tryProp() {
return
}
@@ -1985,6 +1990,10 @@ func (exp *explorerUI) Search(w http.ResponseWriter, r *http.Request) {
"", ExpStatusNotFound)
return
}
+ hashStr := hash.String()
+ if utxoLike {
+ searchStrRewritten = hashStr + "/out/" + searchStrSplit[1]
+ }
// A valid hash could be block, txid, or prop. First try blocks, then tx via
// getrawtransaction, then props, then tx via DB query.
@@ -1992,21 +2001,20 @@ func (exp *explorerUI) Search(w http.ResponseWriter, r *http.Request) {
if !utxoLike {
// Attempt to get a block index by calling GetBlockHeight to see if the
// value is a block hash and then redirect to the block page if it is.
- _, err = exp.dataSource.GetBlockHeight(searchStrSplit[0])
+ _, err = exp.dataSource.GetBlockHeight(hashStr)
if err == nil {
- http.Redirect(w, r, "/block/"+searchStrSplit[0], http.StatusPermanentRedirect)
+ http.Redirect(w, r, "/block/"+hashStr, http.StatusPermanentRedirect)
return
}
}
// It's unlikely to be a tx id with many leading/trailing zeros.
- trimmedZeros := 2*chainhash.HashSize - len(strings.Trim(searchStrSplit[0], "0"))
+ trimmedZeros := 2*chainhash.HashSize - len(strings.Trim(hashStr, "0"))
- // Call GetExplorerTx to see if the value is a transaction hash and then
- // redirect to the tx page if it is.
+ // See if it's a transaction and then redirect to the tx page if it is.
if trimmedZeros < 10 {
- tx := exp.dataSource.GetExplorerTx(searchStrSplit[0])
- if tx != nil {
+ _, err = exp.dataSource.GetTransactionByHash(hashStr)
+ if err == nil {
http.Redirect(w, r, "/tx/"+searchStrRewritten, http.StatusPermanentRedirect)
return
}
@@ -2018,16 +2026,16 @@ func (exp *explorerUI) Search(w http.ResponseWriter, r *http.Request) {
}
// Also check the DB as it may have transactions from orphaned blocks.
- if trimmedZeros < 10 {
- dbTxs, err := exp.dataSource.Transaction(searchStrSplit[0])
- if err != nil && !errors.Is(err, dbtypes.ErrNoResult) {
- log.Errorf("Searching for transaction failed: %v", err)
- }
- if dbTxs != nil {
- http.Redirect(w, r, "/tx/"+searchStrRewritten, http.StatusPermanentRedirect)
- return
- }
- }
+ // if trimmedZeros < 10 {
+ // dbTxs, err := exp.dataSource.Transaction(searchStrSplit[0])
+ // if err != nil && !errors.Is(err, dbtypes.ErrNoResult) {
+ // log.Errorf("Searching for transaction failed: %v", err)
+ // }
+ // if dbTxs != nil {
+ // http.Redirect(w, r, "/tx/"+searchStrRewritten, http.StatusPermanentRedirect)
+ // return
+ // }
+ // }
message := "The search did not find any matching address, block, transaction or proposal token: " + searchStr
exp.StatusPage(w, "search failed", message, "", ExpStatusNotFound)
diff --git a/cmd/dcrdata/internal/explorer/templates.go b/cmd/dcrdata/internal/explorer/templates.go
index 562a919aa..bc412ed7e 100644
--- a/cmd/dcrdata/internal/explorer/templates.go
+++ b/cmd/dcrdata/internal/explorer/templates.go
@@ -564,24 +564,45 @@ func makeTemplateFuncMap(params *chaincfg.Params) template.FuncMap {
}
return arr
},
- "hashlink": func(hash string, link string) [2]string {
- return [2]string{hash, link}
- },
- "hashStart": func(hash string) string {
+ "hashlink": func(hash any, link string) [2]string {
+ var h string
+ switch ht := hash.(type) {
+ case string:
+ h = ht
+ case fmt.Stringer: // e.g. dbtypes.ChainHash
+ h = ht.String()
+ }
+ return [2]string{h, link}
+ },
+ "hashStart": func(hash any) string {
+ var h string
+ switch ht := hash.(type) {
+ case string:
+ h = ht
+ case fmt.Stringer: // e.g. dbtypes.ChainHash
+ h = ht.String()
+ }
clipLen := 6
- hashLen := len(hash) - clipLen
+ hashLen := len(h) - clipLen
if hashLen < 1 {
return ""
}
- return hash[0:hashLen]
- },
- "hashEnd": func(hash string) string {
+ return h[0:hashLen]
+ },
+ "hashEnd": func(hash any) string {
+ var h string
+ switch ht := hash.(type) {
+ case string:
+ h = ht
+ case fmt.Stringer: // e.g. dbtypes.ChainHash
+ h = ht.String()
+ }
clipLen := 6
- hashLen := len(hash) - clipLen
+ hashLen := len(h) - clipLen
if hashLen < 0 {
- return hash
+ return h
}
- return hash[hashLen:]
+ return h[hashLen:]
},
"redirectToMainnet": func(netName string, message string) bool {
if netName != "Mainnet" && strings.Contains(message, "mainnet") {
diff --git a/cmd/dcrdata/internal/explorer/templates_test.go b/cmd/dcrdata/internal/explorer/templates_test.go
index 3ef05ce76..506722538 100644
--- a/cmd/dcrdata/internal/explorer/templates_test.go
+++ b/cmd/dcrdata/internal/explorer/templates_test.go
@@ -91,9 +91,9 @@ func TestHashStart(t *testing.T) {
t.Fatalf(`Template function map does not contain "hashStart".`)
}
- hashStartFn, ok := hashStart.(func(hash string) string)
+ hashStartFn, ok := hashStart.(func(hash any) string)
if !ok {
- t.Fatalf(`Template function "hashStart" is not of type "func(hash string) string".`)
+ t.Fatalf(`Template function "hashStart" is not of type "func(hash any) string, is %T".`, hashStart)
}
testData := []struct {
@@ -125,9 +125,9 @@ func TestHashEnd(t *testing.T) {
t.Fatalf(`Template function map does not contain "hashEnd".`)
}
- hashEndFn, ok := hashEnd.(func(hash string) string)
+ hashEndFn, ok := hashEnd.(func(hash any) string)
if !ok {
- t.Fatalf(`Template function "hashEnd" is not of type "func(hash string) string".`)
+ t.Fatalf(`Template function "hashEnd" is not of type "func(hash string) string, is %T".`, hashEnd)
}
testData := []struct {
@@ -159,7 +159,7 @@ func TestHashStartEnd(t *testing.T) {
t.Fatalf(`Template function map does not contain "hashStart".`)
}
- hashStartFn, ok := hashStart.(func(hash string) string)
+ hashStartFn, ok := hashStart.(func(hash any) string)
if !ok {
t.Fatalf(`Template function "hashStart" is not of type "func(hash string) string".`)
}
@@ -169,7 +169,7 @@ func TestHashStartEnd(t *testing.T) {
t.Fatalf(`Template function map does not contain "hashEnd".`)
}
- hashEndFn, ok := hashEnd.(func(hash string) string)
+ hashEndFn, ok := hashEnd.(func(hash any) string)
if !ok {
t.Fatalf(`Template function "hashEnd" is not of type "func(hash string) string".`)
}
diff --git a/cmd/dcrdata/internal/middleware/apimiddleware.go b/cmd/dcrdata/internal/middleware/apimiddleware.go
index 49fbedb18..35b716d9d 100644
--- a/cmd/dcrdata/internal/middleware/apimiddleware.go
+++ b/cmd/dcrdata/internal/middleware/apimiddleware.go
@@ -815,7 +815,7 @@ func ChartGroupingCtx(next http.Handler) http.Handler {
// apiDocs generates a middleware with a "docs" in the context containing a map
// of the routers handlers, etc.
-func apiDocs(mux *chi.Mux) func(next http.Handler) http.Handler {
+func apiDocs(mux *chi.Mux) func(next http.Handler) http.Handler { //nolint
var buf bytes.Buffer
err := json.Indent(&buf, []byte(docgen.JSONRoutesDoc(mux)), "", "\t")
if err != nil {
diff --git a/cmd/dcrdata/main.go b/cmd/dcrdata/main.go
index bfc68f06a..2a9f1d322 100644
--- a/cmd/dcrdata/main.go
+++ b/cmd/dcrdata/main.go
@@ -886,25 +886,18 @@ func _main(ctx context.Context) error {
sideChainsStored++
// Collect and store data for each block in this side chain.
- for _, hash := range sideChain.Hashes {
- // Validate the block hash.
- blockHash, err := chainhash.NewHashFromStr(hash)
- if err != nil {
- log.Errorf("Invalid block hash %s: %v.", hash, err)
- continue
- }
-
+ for _, blockHash := range sideChain.Hashes {
// Collect block data.
- _, msgBlock, err := collector.CollectHash(blockHash)
+ _, msgBlock, err := collector.CollectHash(&blockHash)
if err != nil {
// Do not quit if unable to collect side chain block data.
log.Errorf("Unable to collect data for side chain block %s: %v.",
- hash, err)
+ blockHash, err)
continue
}
// Get the chainwork
- chainWork, err := rpcutils.GetChainWork(chainDB.Client, blockHash)
+ chainWork, err := rpcutils.GetChainWork(chainDB.Client, &blockHash)
if err != nil {
log.Errorf("GetChainWork failed (%s): %v", blockHash, err)
continue
diff --git a/cmd/dcrdata/views/extras.tmpl b/cmd/dcrdata/views/extras.tmpl
index b18893172..2be7a64b9 100644
--- a/cmd/dcrdata/views/extras.tmpl
+++ b/cmd/dcrdata/views/extras.tmpl
@@ -304,7 +304,7 @@
{{template "decimalParts" (float64AsDecimalParts .ReceivedTotal 8 false)}} |
{{- else if or (eq $txType "credit") .IsFunding}}{{/* .IsFunding = true && txType = "all" is a credit */}}
{{template "decimalParts" (float64AsDecimalParts .ReceivedTotal 8 false)}} |
- {{- if ne .MatchedTx ""}}
+ {{- if .MatchedTx}}
sstxcommitment |
- {{- else if ne .MatchedTx ""}}
+ {{- else if .MatchedTx }}
source |
{{- else}}
N/A |
diff --git a/cmd/swapscan-btc/go.mod b/cmd/swapscan-btc/go.mod
index 115304e7e..ad7efea4d 100644
--- a/cmd/swapscan-btc/go.mod
+++ b/cmd/swapscan-btc/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/cmd/swapscan-btc
-go 1.18
+go 1.21
require (
github.com/btcsuite/btcd v0.23.3
diff --git a/db/cache/addresscache.go b/db/cache/addresscache.go
index 08d4f3521..cbf7dd5ef 100644
--- a/db/cache/addresscache.go
+++ b/db/cache/addresscache.go
@@ -16,7 +16,6 @@ import (
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrdata/v8/db/dbtypes"
- "github.com/decred/dcrdata/v8/txhelpers"
)
const (
@@ -96,7 +95,7 @@ func CountCreditDebitRowsCompact(rows []*dbtypes.AddressRowCompact) (numCredit,
// in a []dbtypes.AddressRowCompact.
func CountUnspentCreditRowsCompact(rows []*dbtypes.AddressRowCompact) (numCredit int) {
for _, row := range rows {
- if row.IsFunding && txhelpers.IsZeroHash(row.MatchingTxHash) {
+ if row.IsFunding && row.MatchingTxHash.IsZero() {
numCredit++
}
}
@@ -324,7 +323,7 @@ func unspentCreditAddressRows(rows []*dbtypes.AddressRowCompact, N, offset int)
var skipped int
out := make([]*dbtypes.AddressRowCompact, 0, N)
for _, row := range rows {
- if !row.IsFunding || !txhelpers.IsZeroHash(row.MatchingTxHash) {
+ if !row.IsFunding || !row.MatchingTxHash.IsZero() {
continue
}
@@ -725,7 +724,7 @@ func NewAddressCache(rowCapacity, addressCapacity, utxoCapacityBytes int) *Addre
}
log.Debugf("Allowing %d cached UTXOs per address (max %d addresses), using ~%.0f MiB.",
ac.maxUTXOsPerAddr, addressCapacity, float64(utxoCapacityBytes)/1024/1024)
- defer func() { go ac.Reporter() }()
+ go ac.Reporter()
return ac
}
diff --git a/db/cache/addresscache_test.go b/db/cache/addresscache_test.go
index 8e9da4091..d7bca673a 100644
--- a/db/cache/addresscache_test.go
+++ b/db/cache/addresscache_test.go
@@ -97,7 +97,7 @@ func TestAddressCacheItem_Transactions(t *testing.T) {
aci.rows = []*dbtypes.AddressRowCompact{
{
Address: "Dsnieug5H7Zn3SjUWwbcZ17ox9d3F2TEvZV",
- TxHash: *txHash,
+ TxHash: dbtypes.ChainHash(*txHash),
Value: 121,
},
}
diff --git a/db/cache/charts.go b/db/cache/charts.go
index cbc1d65bc..1b29dafe2 100644
--- a/db/cache/charts.go
+++ b/db/cache/charts.go
@@ -886,7 +886,7 @@ func (charts *ChartData) Update() error {
if err != nil {
return err
}
- log.Tracef(" - Chart updater %q completed in %f seconds.",
+ log.Debugf(" - Chart updater %q completed in %f seconds.",
updater.Tag, time.Since(ti).Seconds())
}
diff --git a/db/dbtypes/conversion.go b/db/dbtypes/conversion.go
index 07ef72b59..bf792bd23 100644
--- a/db/dbtypes/conversion.go
+++ b/db/dbtypes/conversion.go
@@ -11,33 +11,20 @@ import (
)
// MsgBlockToDBBlock creates a dbtypes.Block from a wire.MsgBlock
-func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params, chainWork string, winners []string) *Block {
+func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params, chainWork string, winners []ChainHash) *Block {
// Create the dbtypes.Block structure
blockHeader := msgBlock.Header
- // convert each transaction hash to a hex string
- txHashStrs := make([]string, 0, len(msgBlock.Transactions))
- for _, tx := range msgBlock.Transactions {
- txHashStrs = append(txHashStrs, tx.CachedTxHash().String())
- }
-
- stxHashStrs := make([]string, 0, len(msgBlock.STransactions))
- for _, tx := range msgBlock.STransactions {
- stxHashStrs = append(stxHashStrs, tx.CachedTxHash().String())
- }
-
// Assemble the block
return &Block{
- Hash: blockHeader.BlockHash().String(),
+ Hash: ChainHash(blockHeader.BlockHash()),
Size: uint32(msgBlock.SerializeSize()),
Height: blockHeader.Height,
Version: uint32(blockHeader.Version),
NumTx: uint32(len(msgBlock.Transactions) + len(msgBlock.STransactions)),
// nil []int64 for TxDbIDs
NumRegTx: uint32(len(msgBlock.Transactions)),
- Tx: txHashStrs,
NumStakeTx: uint32(len(msgBlock.STransactions)),
- STx: stxHashStrs,
Time: NewTimeDef(blockHeader.Timestamp),
Nonce: uint64(blockHeader.Nonce),
VoteBits: blockHeader.VoteBits,
@@ -49,7 +36,7 @@ func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params, ch
SBits: uint64(blockHeader.SBits),
Difficulty: txhelpers.GetDifficultyRatio(blockHeader.Bits, chainParams),
StakeVersion: blockHeader.StakeVersion,
- PreviousHash: blockHeader.PrevBlock.String(),
+ PreviousHash: ChainHash(blockHeader.PrevBlock),
ChainWork: chainWork,
Winners: winners,
}
diff --git a/db/dbtypes/extraction.go b/db/dbtypes/extraction.go
index 506405824..4d9618134 100644
--- a/db/dbtypes/extraction.go
+++ b/db/dbtypes/extraction.go
@@ -51,7 +51,7 @@ func processTransactions(msgBlock *wire.MsgBlock, tree int8, chainParams *chainc
}
blockHeight := msgBlock.Header.Height
- blockHash := msgBlock.BlockHash()
+ blockHash := ChainHash(msgBlock.BlockHash())
blockTime := NewTimeDef(msgBlock.Header.Timestamp)
dbTransactions := make([]*Tx, 0, len(txs))
@@ -87,14 +87,13 @@ func processTransactions(msgBlock *wire.MsgBlock, tree int8, chainParams *chainc
}
fees := spent - sent
dbTx := &Tx{
- BlockHash: blockHash.String(),
+ BlockHash: blockHash,
BlockHeight: int64(blockHeight),
BlockTime: blockTime,
- Time: blockTime, // TODO, receive time? no! REMOVE
TxType: int16(txType),
Version: tx.Version,
Tree: tree,
- TxID: tx.CachedTxHash().String(),
+ TxID: ChainHash(*tx.CachedTxHash()),
BlockIndex: uint32(txIndex),
Locktime: tx.LockTime,
Expiry: tx.Expiry,
@@ -114,8 +113,7 @@ func processTransactions(msgBlock *wire.MsgBlock, tree int8, chainParams *chainc
dbTxVins[txIndex] = make(VinTxPropertyARRAY, 0, len(tx.TxIn))
for idx, txin := range tx.TxIn {
dbTxVins[txIndex] = append(dbTxVins[txIndex], VinTxProperty{
- PrevOut: txin.PreviousOutPoint.String(),
- PrevTxHash: txin.PreviousOutPoint.Hash.String(),
+ PrevTxHash: ChainHash(txin.PreviousOutPoint.Hash),
PrevTxIndex: txin.PreviousOutPoint.Index,
PrevTxTree: uint16(txin.PreviousOutPoint.Tree),
Sequence: txin.Sequence,
@@ -140,22 +138,20 @@ func processTransactions(msgBlock *wire.MsgBlock, tree int8, chainParams *chainc
//dbTx.Vouts = make([]*Vout, 0, len(tx.TxOut))
for io, txout := range tx.TxOut {
vout := Vout{
- TxHash: dbTx.TxID,
- TxIndex: uint32(io),
- TxTree: tree,
- TxType: dbTx.TxType,
- Value: uint64(txout.Value),
- Version: txout.Version,
- ScriptPubKey: txout.PkScript,
- Mixed: mixDenom > 0 && mixDenom == txout.Value, // later, check ticket and vote outputs against the spent outputs' mixed status
+ TxHash: dbTx.TxID,
+ TxIndex: uint32(io),
+ TxTree: tree,
+ TxType: dbTx.TxType,
+ Value: uint64(txout.Value),
+ Version: txout.Version,
+ Mixed: mixDenom > 0 && mixDenom == txout.Value, // later, check ticket and vote outputs against the spent outputs' mixed status
}
- scriptClass, scriptAddrs := stdscript.ExtractAddrs(vout.Version, vout.ScriptPubKey, chainParams)
- reqSigs := stdscript.DetermineRequiredSigs(vout.Version, vout.ScriptPubKey)
+ scriptClass, scriptAddrs := stdscript.ExtractAddrs(vout.Version, txout.PkScript, chainParams)
+ // reqSigs := stdscript.DetermineRequiredSigs(vout.Version, vout.ScriptPubKey)
addys := make([]string, 0, len(scriptAddrs))
for ia := range scriptAddrs {
addys = append(addys, scriptAddrs[ia].String())
}
- vout.ScriptPubKeyData.ReqSigs = uint32(reqSigs)
vout.ScriptPubKeyData.Type = NewScriptClass(scriptClass)
vout.ScriptPubKeyData.Addresses = addys
dbTxVouts[txIndex] = append(dbTxVouts[txIndex], &vout)
diff --git a/db/dbtypes/internal/arrays.go b/db/dbtypes/internal/arrays.go
index 804095498..be3f0c379 100644
--- a/db/dbtypes/internal/arrays.go
+++ b/db/dbtypes/internal/arrays.go
@@ -4,7 +4,9 @@ package internal
import (
"bytes"
+ "encoding/hex"
"fmt"
+ "strconv"
"strings"
)
@@ -129,3 +131,52 @@ func ScanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
}
return elems, err
}
+
+func ParseBytea(s []byte) ([]byte, error) {
+ if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
+ // bytea_output = hex
+ s = s[2:] // trim off leading "\\x"
+ result := make([]byte, hex.DecodedLen(len(s)))
+ _, err := hex.Decode(result, s)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+
+ // bytea_output = escape
+ var result []byte
+ for len(s) > 0 {
+ if s[0] == '\\' {
+ // escaped '\\'
+ if len(s) >= 2 && s[1] == '\\' {
+ result = append(result, '\\')
+ s = s[2:]
+ continue
+ }
+
+ // '\\' followed by an octal number
+ if len(s) < 4 {
+ return nil, fmt.Errorf("invalid bytea sequence %v", s)
+ }
+ r, err := strconv.ParseUint(string(s[1:4]), 8, 8)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
+ }
+ result = append(result, byte(r))
+ s = s[4:]
+ } else {
+ // We hit an unescaped, raw byte. Try to read in as many as
+ // possible in one go.
+ i := bytes.IndexByte(s, '\\')
+ if i == -1 {
+ result = append(result, s...)
+ break
+ }
+ result = append(result, s[:i]...)
+ s = s[i:]
+ }
+ }
+
+ return result, nil
+}
diff --git a/db/dbtypes/types.go b/db/dbtypes/types.go
index 46cac8636..c8c6fca04 100644
--- a/db/dbtypes/types.go
+++ b/db/dbtypes/types.go
@@ -4,10 +4,13 @@
package dbtypes
import (
+ "bytes"
"context"
"database/sql"
"database/sql/driver"
+ "encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"sort"
"strconv"
@@ -17,6 +20,7 @@ import (
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/dcrutil/v4"
"github.com/decred/dcrd/txscript/v4/stdscript"
+ "github.com/lib/pq"
"github.com/decred/dcrdata/v8/db/dbtypes/internal"
"github.com/decred/dcrdata/v8/txhelpers"
@@ -164,6 +168,210 @@ func (sc *ScriptClass) UnmarshalJSON(b []byte) error {
return nil
}
+type ChainHash chainhash.Hash
+
+func (ch ChainHash) String() string {
+ return chainhash.Hash(ch).String() // byte reverse and hex encode
+}
+
+func (ch *ChainHash) MarshalJSON() ([]byte, error) {
+ return json.Marshal(ch.String())
+}
+
+var zeroHash chainhash.Hash
+
+func (ch *ChainHash) IsZero() bool {
+ if ch == nil {
+ return true
+ }
+ return (*chainhash.Hash)(ch).IsEqual(&zeroHash)
+}
+
+func (ch *ChainHash) UnmarshalJSON(b []byte) error {
+ if len(b) < 2+64 {
+ return fmt.Errorf("wrong length")
+ }
+ if b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("not a quoted json string")
+ }
+ bs := b[1 : len(b)-1]
+ err := chainhash.Decode((*chainhash.Hash)(ch), string(bs))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func revHash(ch *ChainHash) {
+ sz := len(ch)
+ for i := range ch[:sz/2] {
+ ch[i], ch[sz-1-i] = ch[sz-1-i], ch[i]
+ }
+}
+
+// Scan satisfies the sql.Scanner interface.
+func (ch *ChainHash) Scan(src interface{}) error {
+ // switch src := src.(type) {
+ // case []byte:
+ // if len(src) != chainhash.HashSize {
+ // return fmt.Errorf("wrong length chain hash")
+ // }
+ // copy(ch[:], src)
+ // revHash(ch)
+ // return nil
+ // case nil:
+ // return nil
+ // }
+ // return fmt.Errorf("not a chain hash")
+
+ if src == nil { // scanned a NULL value
+ *ch = ChainHash{} // caller doesn't really know if it was null or zeros, context is important
+ return nil // leave *ch as the zero value
+ }
+ hb, ok := src.([]byte)
+ if !ok {
+ return fmt.Errorf("not a chain hash")
+ }
+ sz := len(hb)
+ if sz == 0 {
+ return nil // pretend this was NULL
+ }
+ if sz != chainhash.HashSize {
+ return fmt.Errorf("wrong length chain hash")
+ }
+ // copy(ch[:], hb)
+ // revHash(ch)
+ for i := range hb {
+ ch[sz-1-i], ch[i] = hb[i], hb[sz-1-i]
+ }
+ return nil
+}
+
+var _ sql.Scanner = (*ChainHash)(nil)
+
+// Scan satisfies the sql/driver.Valuer interface.
+func (ch ChainHash) Value() (driver.Value, error) {
+ revHash(&ch) // reverse in place since this is a value receiver
+ // fmt.Printf("%x\n", ch[:])
+ return ch[:], nil
+}
+
+var _ driver.Valuer = ChainHash{}
+var _ driver.Valuer = (*ChainHash)(nil)
+
+// ChainHashArray represents a one-dimensional array of the ChainHash type.
+type ChainHashArray []ChainHash
+
+// Scan implements the sql.Scanner interface.
+func (a *ChainHashArray) Scan(src interface{}) error {
+ var ba pq.ByteaArray
+ err := ba.Scan(src)
+ if err != nil {
+ return err
+ }
+
+ *a = make([]ChainHash, len(ba))
+ for i := range ba {
+ chi := &(*a)[i]
+ if err = chi.Scan(ba[i]); err != nil {
+ *a = ChainHashArray{}
+ return err
+ }
+ }
+ return nil
+}
+
+// Value implements the driver.Valuer interface.
+func (a ChainHashArray) Value() (driver.Value, error) {
+ if a == nil {
+ return nil, nil
+ }
+ if len(a) == 0 {
+ return "{}", nil
+ }
+
+ ba := make(pq.ByteaArray, 0, len(a))
+ for i := range a {
+ revB, _ := a[i].Value()
+ ba = append(ba, revB.([]byte))
+ }
+ return ba.Value()
+}
+
+// ChainHashArray2 represents a one-dimensional array of the ChainHash bytea type.
+type ChainHashArray2 []ChainHash
+
+// Scan implements the sql.Scanner interface.
+func (a *ChainHashArray2) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case []byte:
+ return a.scanBytes(src)
+ case string:
+ return a.scanBytes([]byte(src))
+ case nil:
+ *a = nil
+ return nil
+ }
+
+ return fmt.Errorf("pq: cannot convert %T to ChainHashArray2", src)
+}
+
+func (a *ChainHashArray2) scanBytes(src []byte) error {
+ elems, err := internal.ScanLinearArray(src, []byte{','}, "ChainHashArray2")
+ if err != nil {
+ return err
+ }
+ if len(elems) == 0 && *a != nil {
+ *a = (*a)[:0] // truncate if it's a non-nil slice
+ return nil
+ }
+
+ b := make(ChainHashArray2, len(elems))
+ for i, s := range elems {
+ if len(s) < 2 || !bytes.Equal(s[:2], []byte("\\x")) {
+ return errors.New("scanBytes: invalid chain hash")
+ }
+ s = s[2:] // trim off leading "\x"
+ if err = b[i].Scan(s); err != nil {
+ return err
+ }
+ }
+ *a = b
+
+ return nil
+}
+
+// Value implements the driver.Valuer interface. It uses the "hex" format which
+// is only supported on PostgreSQL 9.0 or newer.
+func (a ChainHashArray2) Value() (driver.Value, error) {
+ if a == nil {
+ return nil, nil
+ }
+
+ n := len(a)
+ if n == 0 {
+ return "{}", nil
+ }
+
+ // There will be at least two curly brackets, 2*N bytes of quotes,
+ // 3*N bytes of hex formatting, and N-1 bytes of delimiters.
+ size := 1 + 6*n + len(a)*64
+ b := make([]byte, size)
+
+ for i, s := 0, b; i < n; i++ {
+ o := copy(s, `,"\\x`)
+ ar, _ := a[i].Value()
+ o += hex.Encode(s[o:], ar.([]byte))
+ s[o] = '"'
+ s = s[o+1:]
+ }
+
+ b[0] = '{' // overwrites a comma
+ b[size-1] = '}'
+
+ return string(b), nil
+}
+
// ErrorKind identifies a kind of error that can be used to define new errors
// via const SomeError = dbtypes.ErrorKind("something").
type ErrorKind string
@@ -705,6 +913,7 @@ func ChoiceIndexFromStr(choice string) (VoteChoice, error) {
}
// These are text keys used to identify different chart types.
+/*
const (
AvgBlockSize = "avg-block-size"
BlockChainSize = "blockchain-size"
@@ -723,6 +932,7 @@ const (
TicketPoolSize = "ticket-pool-size"
TicketPoolValue = "ticket-pool-value"
)
+*/
// MileStone defines the various stages passed by vote on a given agenda.
// Activated is the height at which the delay time begins before a vote activates.
@@ -752,12 +962,12 @@ type AgendaSummary struct {
// TreasurySpendVotes summarizes the vote tally for a tspend.
type TreasurySpendVotes struct {
- Hash string `json:"hash"`
- Expiry int64 `json:"expiry"`
- VoteStart int64 `json:"votestart"`
- VoteEnd int64 `json:"voteend"`
- YesVotes int64 `json:"yesvotes"`
- NoVotes int64 `json:"novotes"`
+ Hash ChainHash `json:"hash"`
+ Expiry int64 `json:"expiry"`
+ VoteStart int64 `json:"votestart"`
+ VoteEnd int64 `json:"voteend"`
+ YesVotes int64 `json:"yesvotes"`
+ NoVotes int64 `json:"novotes"`
}
// TreasurySpendMetaData extends TreasurySpendVotes and contains some
@@ -786,7 +996,7 @@ type BlockChainData struct {
Chain string
SyncHeight int64
BestHeight int64
- BestBlockHash string
+ BestBlockHash ChainHash
Difficulty uint32
VerificationProgress float64
ChainWork string
@@ -1001,13 +1211,12 @@ func (a UInt64Array) Value() (driver.Value, error) {
// Vout defines a transaction output
type Vout struct {
- TxHash string `json:"tx_hash"`
+ TxHash ChainHash `json:"tx_hash"`
TxIndex uint32 `json:"tx_index"`
TxTree int8 `json:"tx_tree"`
TxType int16 `json:"tx_type"`
Value uint64 `json:"value"`
Version uint16 `json:"version"`
- ScriptPubKey []byte `json:"pkScriptHex"`
ScriptPubKeyData ScriptPubKeyData `json:"pkScript"`
Mixed bool `json:"mixed"`
}
@@ -1023,7 +1232,7 @@ type UTXOData struct {
// UTXO represents a transaction output, but it is intended to help track
// unspent outputs.
type UTXO struct {
- TxHash string
+ TxHash ChainHash
TxIndex uint32
UTXOData
}
@@ -1034,10 +1243,10 @@ type AddressRow struct {
ValidMainChain bool
// MatchingTxHash provides the relationship between spending tx inputs and
// funding tx outputs.
- MatchingTxHash string
+ MatchingTxHash *ChainHash
IsFunding bool
TxBlockTime TimeDef
- TxHash string
+ TxHash ChainHash
TxVinVoutIndex uint32
Value uint64
VinVoutDbID uint64
@@ -1066,8 +1275,8 @@ func (ar *AddressRow) IsMerged() bool {
type AddressRowCompact struct {
Address string
TxBlockTime int64
- MatchingTxHash chainhash.Hash
- TxHash chainhash.Hash
+ TxHash ChainHash
+ MatchingTxHash *ChainHash
TxVinVoutIndex uint32
TxType int16
ValidMainChain bool
@@ -1086,7 +1295,7 @@ type AddressRowCompact struct {
type AddressRowMerged struct {
Address string
TxBlockTime int64
- TxHash chainhash.Hash
+ TxHash ChainHash
AtomsCredit uint64
AtomsDebit uint64
MergedCount int32
@@ -1183,7 +1392,7 @@ func CountCreditDebitRows(rows []*AddressRow) (numCredit, numDebit int) {
// address rows in a []*AddressRow.
func CountUnspentCreditRows(rows []*AddressRow) (numCredit, numDebit int) {
for _, r := range rows {
- if r.IsFunding && r.MatchingTxHash == "" {
+ if r.IsFunding && r.MatchingTxHash == nil {
numCredit++
}
}
@@ -1326,7 +1535,7 @@ func CountMergedRows(rows []*AddressRow, txnView AddrTxnViewType) (numMerged int
return 0, fmt.Errorf("MergedTxnCount: requested count for non-merged view")
}
- merged := make(map[string]struct{})
+ merged := make(map[ChainHash]struct{})
for _, r := range rows {
if r.MergedCount != 0 {
return 0, fmt.Errorf("CountMergedRows: merged row found in input; " +
@@ -1369,7 +1578,7 @@ func CountMergedRowsCompact(rows []*AddressRowCompact, txnView AddrTxnViewType)
return 0, fmt.Errorf("MergedTxnCount: requested count for non-merged view")
}
- merged := make(map[chainhash.Hash]struct{})
+ merged := make(map[ChainHash]struct{})
for _, row := range rows {
if wrongDirection(row.IsFunding) {
continue
@@ -1399,7 +1608,7 @@ func MergeRows(rows []*AddressRow) ([]*AddressRowMerged, error) {
// pre-allocate, since we have an idea of the ballpark size of the result,
// but try not to overshoot as space will be wasted.
numUniqueHashesGuess := len(rows)*2/3 + 1
- hashMap := make(map[chainhash.Hash]*AddressRowMerged, numUniqueHashesGuess)
+ hashMap := make(map[ChainHash]*AddressRowMerged, numUniqueHashesGuess)
mergedRows := make([]*AddressRowMerged, 0, numUniqueHashesGuess)
for _, r := range rows {
if r.MergedCount != 0 {
@@ -1407,19 +1616,13 @@ func MergeRows(rows []*AddressRow) ([]*AddressRowMerged, error) {
"only non-merged rows may be merged")
}
- Hash, err := chainhash.NewHashFromStr(r.TxHash)
- if err != nil {
- fmt.Printf("invalid address: %s", r.TxHash)
- continue
- }
-
// New transactions are started with MergedCount = 1.
- row := hashMap[*Hash]
+ row := hashMap[r.TxHash]
if row == nil {
mr := AddressRowMerged{
Address: r.Address,
TxBlockTime: r.TxBlockTime.T.Unix(),
- TxHash: *Hash,
+ TxHash: r.TxHash,
MergedCount: 1,
TxType: r.TxType,
ValidMainChain: r.ValidMainChain,
@@ -1431,7 +1634,7 @@ func MergeRows(rows []*AddressRow) ([]*AddressRowMerged, error) {
mr.AtomsDebit = r.Value
}
- hashMap[*Hash] = &mr
+ hashMap[r.TxHash] = &mr
mergedRows = append(mergedRows, &mr)
continue
}
@@ -1486,10 +1689,10 @@ func MergeRowsRange(rows []*AddressRow, N, offset int, txnView AddrTxnViewType)
// Skip over the first offset unique tx hashes.
var skipped int
- seen := make(map[chainhash.Hash]struct{}, offset)
+ seen := make(map[ChainHash]struct{}, offset)
// Output has at most N elements, each with a unique hash.
- hashMap := make(map[chainhash.Hash]*AddressRowMerged, N)
+ hashMap := make(map[ChainHash]*AddressRowMerged, N)
mergedRows := make([]*AddressRowMerged, 0, N)
for _, r := range rows {
if wrongDirection(r.IsFunding) {
@@ -1501,14 +1704,8 @@ func MergeRowsRange(rows []*AddressRow, N, offset int, txnView AddrTxnViewType)
"only non-merged rows may be merged")
}
- Hash, err := chainhash.NewHashFromStr(r.TxHash)
- if err != nil {
- fmt.Printf("invalid address: %s", r.TxHash)
- continue
- }
-
// New transactions are started with MergedCount = 1.
- row := hashMap[*Hash]
+ row := hashMap[r.TxHash]
if row == nil {
// Do not get beyond N merged rows, but continue looking for more
// data to merge.
@@ -1518,11 +1715,11 @@ func MergeRowsRange(rows []*AddressRow, N, offset int, txnView AddrTxnViewType)
// Skip over offset merged rows.
if skipped < offset {
- if _, found := seen[*Hash]; !found {
+ if _, found := seen[r.TxHash]; !found {
// This new hash would create a new merged row. Increment
// the skip counter and register this tx hash.
skipped++
- seen[*Hash] = struct{}{}
+ seen[r.TxHash] = struct{}{}
}
// Skip this merged row data.
continue
@@ -1531,7 +1728,7 @@ func MergeRowsRange(rows []*AddressRow, N, offset int, txnView AddrTxnViewType)
mr := AddressRowMerged{
Address: r.Address,
TxBlockTime: r.TxBlockTime.T.Unix(),
- TxHash: *Hash,
+ TxHash: r.TxHash,
MergedCount: 1,
TxType: r.TxType,
ValidMainChain: r.ValidMainChain,
@@ -1543,7 +1740,7 @@ func MergeRowsRange(rows []*AddressRow, N, offset int, txnView AddrTxnViewType)
mr.AtomsDebit = r.Value
}
- hashMap[*Hash] = &mr
+ hashMap[r.TxHash] = &mr
mergedRows = append(mergedRows, &mr)
continue
}
@@ -1572,7 +1769,7 @@ func MergeRowsCompact(rows []*AddressRowCompact) []*AddressRowMerged {
// pre-allocate, since we have an idea of the ballpark size of the result,
// but try not to overshoot as space will be wasted.
numUniqueHashesGuess := len(rows)*2/3 + 1
- hashMap := make(map[chainhash.Hash]*AddressRowMerged, numUniqueHashesGuess)
+ hashMap := make(map[ChainHash]*AddressRowMerged, numUniqueHashesGuess)
mergedRows := make([]*AddressRowMerged, 0, numUniqueHashesGuess)
for _, r := range rows {
// New transactions are started with MergedCount = 1.
@@ -1648,10 +1845,10 @@ func MergeRowsCompactRange(rows []*AddressRowCompact, N, offset int, txnView Add
// Skip over the first offset unique tx hashes.
var skipped int
- seen := make(map[chainhash.Hash]struct{}, offset)
+ seen := make(map[ChainHash]struct{}, offset)
// Output has at most N elements, each with a unique hash.
- hashMap := make(map[chainhash.Hash]*AddressRowMerged, N)
+ hashMap := make(map[ChainHash]*AddressRowMerged, N)
mergedRows := make([]*AddressRowMerged, 0, N)
for _, r := range rows {
if wrongDirection(r.IsFunding) {
@@ -1715,17 +1912,11 @@ func MergeRowsCompactRange(rows []*AddressRowCompact, N, offset int, txnView Add
func CompactRows(rows []*AddressRow) []*AddressRowCompact {
compact := make([]*AddressRowCompact, 0, len(rows))
for _, r := range rows {
- hash, err := chainhash.NewHashFromStr(r.TxHash)
- if err != nil {
- fmt.Println("Bad hash", r.TxHash)
- return nil
- }
- mhash, _ := chainhash.NewHashFromStr(r.MatchingTxHash) // zero array on error
compact = append(compact, &AddressRowCompact{
Address: r.Address,
TxBlockTime: r.TxBlockTime.UNIX(),
- MatchingTxHash: *mhash,
- TxHash: *hash,
+ MatchingTxHash: r.MatchingTxHash,
+ TxHash: r.TxHash,
TxVinVoutIndex: r.TxVinVoutIndex,
TxType: r.TxType,
ValidMainChain: r.ValidMainChain,
@@ -1745,18 +1936,13 @@ func UncompactRows(compact []*AddressRowCompact) []*AddressRow {
}
rows := make([]*AddressRow, 0, len(compact))
for _, r := range compact {
- // An unset matching hash is represented by the zero-value array.
- var matchingHash string
- if !txhelpers.IsZeroHash(r.MatchingTxHash) {
- matchingHash = r.MatchingTxHash.String()
- }
rows = append(rows, &AddressRow{
Address: r.Address,
ValidMainChain: r.ValidMainChain,
- MatchingTxHash: matchingHash,
+ MatchingTxHash: r.MatchingTxHash,
IsFunding: r.IsFunding,
TxBlockTime: NewTimeDefFromUNIX(r.TxBlockTime),
- TxHash: r.TxHash.String(),
+ TxHash: r.TxHash,
TxVinVoutIndex: r.TxVinVoutIndex,
Value: r.Value,
// VinVoutDbID unknown. Do not use.
@@ -1781,7 +1967,7 @@ func UncompactMergedRows(merged []*AddressRowMerged) []*AddressRow {
// no MatchingTxHash for merged
IsFunding: r.IsFunding(),
TxBlockTime: NewTimeDefFromUNIX(r.TxBlockTime),
- TxHash: r.TxHash.String(),
+ TxHash: r.TxHash,
// no TxVinVoutIndex for merged
Value: r.Value(),
// no VinVoutDbID for merged
@@ -1795,8 +1981,8 @@ func UncompactMergedRows(merged []*AddressRowMerged) []*AddressRow {
// AddressTxnOutput is a compact version of api/types.AddressTxnOutput.
type AddressTxnOutput struct {
Address string
- PkScript string
- TxHash chainhash.Hash
+ PkScript []byte
+ TxHash ChainHash
//BlockHash chainhash.Hash
Vout uint32
Height int32
@@ -1807,7 +1993,7 @@ type AddressTxnOutput struct {
// AddressMetrics defines address metrics needed to make decisions by which
// grouping buttons on the address history page charts should be disabled or
// enabled by default.
-type AddressMetrics struct {
+type AddressMetrics struct { // unused as of?
OldestBlockTime TimeDef
YearTxsCount int64 // number of year intervals with transactions
MonthTxsCount int64 // number of year month with transactions
@@ -1815,37 +2001,22 @@ type AddressMetrics struct {
DayTxsCount int64 // number of year day with transactions
}
-// ChartsData defines the fields that store the values needed to plot the charts
-// on the frontend.
+// ChartsData defines the fields that store the values needed to plot address
+// charts on the frontend.
type ChartsData struct {
- Difficulty []float64 `json:"difficulty,omitempty"`
Time []TimeDef `json:"time,omitempty"`
- Size []uint64 `json:"size,omitempty"`
- ChainSize []uint64 `json:"chainsize,omitempty"`
- Count []uint64 `json:"count,omitempty"`
- SizeF []float64 `json:"sizef,omitempty"`
- ValueF []float64 `json:"valuef,omitempty"`
- Unspent []uint64 `json:"unspent,omitempty"`
- Revoked []uint64 `json:"revoked,omitempty"`
- Height []uint64 `json:"height,omitempty"`
- Pooled []uint64 `json:"pooled,omitempty"`
- Solo []uint64 `json:"solo,omitempty"`
SentRtx []uint64 `json:"sentRtx,omitempty"`
ReceivedRtx []uint64 `json:"receivedRtx,omitempty"`
- Tickets []uint64 `json:"tickets,omitempty"`
- Votes []uint64 `json:"votes,omitempty"`
- RevokeTx []uint64 `json:"revokeTx,omitempty"`
- Amount []float64 `json:"amount,omitempty"`
+ Tickets []uint32 `json:"tickets,omitempty"`
+ Votes []uint32 `json:"votes,omitempty"`
+ RevokeTx []uint32 `json:"revokeTx,omitempty"`
Received []float64 `json:"received,omitempty"`
Sent []float64 `json:"sent,omitempty"`
Net []float64 `json:"net,omitempty"`
- ChainWork []uint64 `json:"chainwork,omitempty"`
- NetHash []uint64 `json:"nethash,omitempty"`
}
// ScriptPubKeyData is part of the result of decodescript(ScriptPubKeyHex)
type ScriptPubKeyData struct {
- ReqSigs uint32 `json:"reqSigs"`
Type ScriptClass `json:"type"` // marshals to string
Addresses []string `json:"addresses"`
// NOTE: Script version is in Vout struct.
@@ -1853,22 +2024,21 @@ type ScriptPubKeyData struct {
// VinTxProperty models a transaction input with previous outpoint information.
type VinTxProperty struct {
- PrevOut string `json:"prevout"`
- PrevTxHash string `json:"prevtxhash"`
- PrevTxIndex uint32 `json:"prevvoutidx"`
- PrevTxTree uint16 `json:"tree"`
- Sequence uint32 `json:"sequence"`
- ValueIn int64 `json:"amountin"`
- TxID string `json:"tx_hash"`
- TxIndex uint32 `json:"tx_index"`
- TxTree uint16 `json:"tx_tree"`
- TxType int16 `json:"tx_type"`
- BlockHeight uint32 `json:"blockheight"`
- BlockIndex uint32 `json:"blockindex"`
- ScriptSig []byte `json:"scriptSig"`
- IsValid bool `json:"is_valid"`
- IsMainchain bool `json:"is_mainchain"`
- Time TimeDef `json:"time"`
+ PrevTxHash ChainHash `json:"prevtxhash"`
+ PrevTxIndex uint32 `json:"prevvoutidx"`
+ PrevTxTree uint16 `json:"tree"`
+ Sequence uint32 `json:"sequence"`
+ ValueIn int64 `json:"amountin"`
+ TxID ChainHash `json:"tx_hash"`
+ TxIndex uint32 `json:"tx_index"`
+ TxTree uint16 `json:"tx_tree"`
+ TxType int16 `json:"tx_type"`
+ BlockHeight uint32 `json:"blockheight"`
+ BlockIndex uint32 `json:"blockindex"`
+ ScriptSig []byte `json:"scriptSig"`
+ IsValid bool `json:"is_valid"`
+ IsMainchain bool `json:"is_mainchain"`
+ Time TimeDef `json:"time"`
}
// PoolTicketsData defines the real time data
@@ -1884,18 +2054,20 @@ type PoolTicketsData struct {
}
// Vin models a transaction input.
+/* unused
type Vin struct {
//txDbID int64
- Coinbase string `json:"coinbase"`
- TxHash string `json:"txhash"`
- VoutIdx uint32 `json:"voutidx"`
- Tree int8 `json:"tree"`
- Sequence uint32 `json:"sequence"`
- AmountIn float64 `json:"amountin"`
- BlockHeight uint32 `json:"blockheight"`
- BlockIndex uint32 `json:"blockindex"`
- ScriptHex string `json:"scripthex"`
-}
+ Coinbase string `json:"coinbase"`
+ TxHash ChainHash `json:"txhash"`
+ VoutIdx uint32 `json:"voutidx"`
+ Tree int8 `json:"tree"`
+ Sequence uint32 `json:"sequence"`
+ AmountIn float64 `json:"amountin"`
+ BlockHeight uint32 `json:"blockheight"`
+ BlockIndex uint32 `json:"blockindex"`
+ ScriptHex string `json:"scripthex"`
+}
+*/
// ScriptSig models the signature script used to redeem the origin transaction
// as a JSON object (non-coinbase txns only)
@@ -1920,29 +2092,27 @@ type AgendaVoteChoices struct {
// Tx models a Decred transaction. It is stored in a Block.
type Tx struct {
//blockDbID int64
- BlockHash string `json:"block_hash"`
- BlockHeight int64 `json:"block_height"`
- BlockTime TimeDef `json:"block_time"`
- Time TimeDef `json:"time"` // REMOVE!
- TxType int16 `json:"tx_type"`
- Version uint16 `json:"version"`
- Tree int8 `json:"tree"`
- TxID string `json:"txid"`
- BlockIndex uint32 `json:"block_index"`
- Locktime uint32 `json:"locktime"`
- Expiry uint32 `json:"expiry"`
- Size uint32 `json:"size"`
- Spent int64 `json:"spent"`
- Sent int64 `json:"sent"`
- Fees int64 `json:"fees"`
- MixCount int32 `json:"mix_count"`
- MixDenom int64 `json:"mix_denom"`
- NumVin uint32 `json:"numvin"`
- //Vins VinTxPropertyARRAY `json:"vins"`
- VinDbIds []uint64 `json:"vindbids"`
- NumVout uint32 `json:"numvout"`
- Vouts []*Vout `json:"vouts"`
- VoutDbIds []uint64 `json:"voutdbids"`
+ BlockHash ChainHash `json:"block_hash"`
+ BlockHeight int64 `json:"block_height"`
+ BlockTime TimeDef `json:"block_time"`
+ TxType int16 `json:"tx_type"`
+ Version uint16 `json:"version"`
+ Tree int8 `json:"tree"`
+ TxID ChainHash `json:"txid"`
+ BlockIndex uint32 `json:"block_index"`
+ Locktime uint32 `json:"locktime"`
+ Expiry uint32 `json:"expiry"`
+ Size uint32 `json:"size"`
+ Spent int64 `json:"spent"`
+ Sent int64 `json:"sent"`
+ Fees int64 `json:"fees"`
+ MixCount int32 `json:"mix_count"`
+ MixDenom int64 `json:"mix_denom"`
+ NumVin uint32 `json:"numvin"`
+ VinDbIds []uint64 `json:"vindbids"`
+ NumVout uint32 `json:"numvout"`
+ Vouts []*Vout `json:"vouts"`
+ VoutDbIds []uint64 `json:"voutdbids"`
// NOTE: VoutDbIds may not be needed if there is a vout table since each
// vout will have a tx_dbid
IsValid bool `json:"valid"`
@@ -1951,62 +2121,60 @@ type Tx struct {
// Block models a Decred block.
type Block struct {
- Hash string `json:"hash"`
- Size uint32 `json:"size"`
- Height uint32 `json:"height"`
- Version uint32 `json:"version"`
+ Hash ChainHash `json:"hash"`
+ Size uint32 `json:"size"`
+ Height uint32 `json:"height"`
+ Version uint32 `json:"version"`
NumTx uint32
NumRegTx uint32
- Tx []string `json:"tx"`
TxDbIDs []uint64
NumStakeTx uint32
- STx []string `json:"stx"`
STxDbIDs []uint64
- Time TimeDef `json:"time"`
- Nonce uint64 `json:"nonce"`
- VoteBits uint16 `json:"votebits"`
- Voters uint16 `json:"voters"`
- FreshStake uint8 `json:"freshstake"`
- Revocations uint8 `json:"revocations"`
- PoolSize uint32 `json:"poolsize"`
- Bits uint32 `json:"bits"`
- SBits uint64 `json:"sbits"`
- Difficulty float64 `json:"difficulty"`
- StakeVersion uint32 `json:"stakeversion"`
- PreviousHash string `json:"previousblockhash"`
- ChainWork string `json:"chainwork"`
- Winners []string `json:"winners"`
+ Time TimeDef `json:"time"`
+ Nonce uint64 `json:"nonce"`
+ VoteBits uint16 `json:"votebits"`
+ Voters uint16 `json:"voters"`
+ FreshStake uint8 `json:"freshstake"`
+ Revocations uint8 `json:"revocations"`
+ PoolSize uint32 `json:"poolsize"`
+ Bits uint32 `json:"bits"`
+ SBits uint64 `json:"sbits"`
+ Difficulty float64 `json:"difficulty"`
+ StakeVersion uint32 `json:"stakeversion"`
+ PreviousHash ChainHash `json:"previousblockhash"`
+ ChainWork string `json:"chainwork"`
+ Winners []ChainHash `json:"winners"`
}
type BlockDataBasic struct {
- Height uint32 `json:"height,omitempty"`
- Size uint32 `json:"size,omitempty"`
- Hash string `json:"hash,omitempty"`
- Difficulty float64 `json:"diff,omitempty"`
- StakeDiff float64 `json:"sdiff,omitempty"`
- Time TimeDef `json:"time,omitempty"`
- NumTx uint32 `json:"txlength,omitempty"`
+ Height uint32 `json:"height,omitempty"`
+ Size uint32 `json:"size,omitempty"`
+ Hash ChainHash `json:"hash,omitempty"`
+ Difficulty float64 `json:"diff,omitempty"`
+ StakeDiff float64 `json:"sdiff,omitempty"`
+ Time TimeDef `json:"time,omitempty"`
+ NumTx uint32 `json:"txlength,omitempty"`
}
// BlockStatus describes a block's status in the block chain.
type BlockStatus struct {
- IsValid bool `json:"is_valid"`
- IsMainchain bool `json:"is_mainchain"`
- Height uint32 `json:"height"`
- PrevHash string `json:"previous_hash"`
- Hash string `json:"hash"`
- NextHash string `json:"next_hash"`
+ IsValid bool `json:"is_valid"`
+ IsMainchain bool `json:"is_mainchain"`
+ Height uint32 `json:"height"`
+ PrevHash ChainHash `json:"previous_hash"`
+ Hash ChainHash `json:"hash"`
+ NextHash ChainHash `json:"next_hash"`
}
// SideChain represents blocks of a side chain, in ascending height order.
type SideChain struct {
- Hashes []string
+ Hashes []chainhash.Hash
Heights []int64
}
// AddressTx models data for transactions on the address page.
type AddressTx struct {
- TxID string
+ TxID ChainHash
TxType string
InOutID uint32
Size uint32
@@ -2017,29 +2185,12 @@ type AddressTx struct {
ReceivedTotal float64
SentTotal float64
IsFunding bool
- MatchedTx string
+ MatchedTx *ChainHash
MatchedTxIndex uint32
MergedTxnCount uint64 `json:",omitempty"`
BlockHeight uint32
}
-// IOID formats an identification string for the transaction input (or output)
-// represented by the AddressTx.
-func (a *AddressTx) IOID(txType ...string) string {
- // If transaction is of type merged_debit, return unformatted transaction ID
- if len(txType) > 0 && AddrTxnViewTypeFromStr(txType[0]) == AddrMergedTxnDebit {
- return a.TxID
- }
- // When AddressTx is used properly, at least one of ReceivedTotal or
- // SentTotal should be zero.
- if a.IsFunding {
- // An outpoint receiving funds
- return fmt.Sprintf("%s:out[%d]", a.TxID, a.InOutID)
- }
- // A transaction input referencing an outpoint being spent
- return fmt.Sprintf("%s:in[%d]", a.TxID, a.InOutID)
-}
-
// Link formats a link for the transaction, with vin/vout index if the AddressTx
// is not merged.
func (a *AddressTx) Link() string {
@@ -2054,10 +2205,10 @@ func (a *AddressTx) Link() string {
}
type TreasuryTx struct {
- TxID string
+ TxID ChainHash
Type int
Amount int64
- BlockHash string
+ BlockHash ChainHash
BlockHeight int64
BlockTime TimeDef
}
diff --git a/db/dbtypes/types_test.go b/db/dbtypes/types_test.go
index 43d1f9331..f3b6fc4d2 100644
--- a/db/dbtypes/types_test.go
+++ b/db/dbtypes/types_test.go
@@ -154,3 +154,31 @@ func TestTimeDef_Scan(t *testing.T) {
t.Fatal("TimeDef.Scan(int64) should have failed")
}
}
+
+func TestChainHashArray2_Value(t *testing.T) {
+ tests := []struct {
+ name string
+ a ChainHashArray2
+ want string
+ wantErr bool
+ }{
+ {
+ "ok",
+ ChainHashArray2{ChainHash{1, 2, 3}, ChainHash{4, 5, 6}},
+ `{"\\x0000000000000000000000000000000000000000000000000000000000030201","\\x0000000000000000000000000000000000000000000000000000000000060504"}`,
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.a.Value()
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ChainHashArray2.Value() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("Want = \"%s\", got = \"%s\"", tt.want, got)
+ }
+ })
+ }
+}
diff --git a/db/dcrpg/go.mod b/db/dcrpg/go.mod
index 04d0ce641..eef50aca7 100644
--- a/db/dcrpg/go.mod
+++ b/db/dcrpg/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/db/dcrpg/v8
-go 1.18
+go 1.21
replace github.com/decred/dcrdata/v8 => ../../
@@ -21,7 +21,7 @@ require (
github.com/dustin/go-humanize v1.0.1
github.com/jessevdk/go-flags v1.5.0
github.com/jrick/logrotate v1.0.0
- github.com/lib/pq v1.10.4
+ github.com/lib/pq v1.10.9
)
require (
diff --git a/db/dcrpg/go.sum b/db/dcrpg/go.sum
index 8b7950486..14d06a3a9 100644
--- a/db/dcrpg/go.sum
+++ b/db/dcrpg/go.sum
@@ -96,6 +96,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
@@ -110,8 +111,8 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk=
-github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
@@ -145,6 +146,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
@@ -190,6 +192,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -225,6 +228,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/db/dcrpg/indexing.go b/db/dcrpg/indexing.go
index b36571279..27b5e3edc 100644
--- a/db/dcrpg/indexing.go
+++ b/db/dcrpg/indexing.go
@@ -374,24 +374,24 @@ func DeindexSwapsTableOnHeight(db *sql.DB) (err error) {
// Delete duplicates
-func (pgb *ChainDB) DeleteDuplicateVins() (int64, error) {
- return DeleteDuplicateVins(pgb.db)
+func (pgb *ChainDB) deleteDuplicateVins() (int64, error) {
+ return deleteDuplicateVins(pgb.db)
}
-func (pgb *ChainDB) DeleteDuplicateVouts() (int64, error) {
- return DeleteDuplicateVouts(pgb.db)
+func (pgb *ChainDB) deleteDuplicateVouts() (int64, error) {
+ return deleteDuplicateVouts(pgb.db)
}
-func (pgb *ChainDB) DeleteDuplicateTxns() (int64, error) {
- return DeleteDuplicateTxns(pgb.db)
+func (pgb *ChainDB) deleteDuplicateTxns() (int64, error) {
+ return deleteDuplicateTxns(pgb.db)
}
-func (pgb *ChainDB) DeleteDuplicateAgendas() (int64, error) {
- return DeleteDuplicateAgendas(pgb.db)
+func (pgb *ChainDB) deleteDuplicateAgendas() (int64, error) {
+ return deleteDuplicateAgendas(pgb.db)
}
-func (pgb *ChainDB) DeleteDuplicateAgendaVotes() (int64, error) {
- return DeleteDuplicateAgendaVotes(pgb.db)
+func (pgb *ChainDB) deleteDuplicateAgendaVotes() (int64, error) {
+ return deleteDuplicateAgendaVotes(pgb.db)
}
// Indexes checks
diff --git a/db/dcrpg/insightapi.go b/db/dcrpg/insightapi.go
index 16cfcb699..e2784feab 100644
--- a/db/dcrpg/insightapi.go
+++ b/db/dcrpg/insightapi.go
@@ -6,7 +6,7 @@ package dcrpg
import (
"context"
- "sort"
+ "encoding/hex"
"time"
"github.com/decred/dcrd/chaincfg/chainhash"
@@ -62,20 +62,6 @@ func (pgb *ChainDB) SendRawTransaction(txhex string) (string, error) {
return hash.String(), err
}
-type txSortable struct {
- Hash chainhash.Hash
- Time int64
-}
-
-func sortTxsByTimeAndHash(txns []txSortable) {
- sort.Slice(txns, func(i, j int) bool {
- if txns[i].Time == txns[j].Time {
- return txns[i].Hash.String() < txns[j].Hash.String()
- }
- return txns[i].Time > txns[j].Time
- })
-}
-
// InsightAddressTransactions performs DB queries to get all transaction hashes
// for the specified addresses in descending order by time, then ascending order
// by hash. It also returns a list of recently (defined as greater than
@@ -98,7 +84,7 @@ func (pgb *ChainDB) InsightAddressTransactions(addr []string, recentBlockHeight
}
for _, r := range rows {
//txns = append(txns, txSortable{r.TxHash, r.TxBlockTime})
- txns = append(txns, r.TxHash)
+ txns = append(txns, chainhash.Hash(r.TxHash))
// Count the number of "recent" txns.
if r.TxBlockTime > recentBlocktime {
numRecent++
@@ -122,13 +108,17 @@ func (pgb *ChainDB) InsightAddressTransactions(addr []string, recentBlockHeight
return
}
-// AddressIDsByOutpoint fetches all address row IDs for a given outpoint
+// OutpointAddresses fetches all addresses and the value of a given outpoint
// (txHash:voutIndex).
-func (pgb *ChainDB) AddressIDsByOutpoint(txHash string, voutIndex uint32) ([]uint64, []string, int64, error) {
+func (pgb *ChainDB) OutpointAddresses(txHash string, voutIndex uint32) ([]string, int64, error) {
+ ch, err := chainHashFromStr(txHash)
+ if err != nil {
+ return nil, 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- ids, addrs, val, err := RetrieveAddressIDsByOutpoint(ctx, pgb.db, txHash, voutIndex)
- return ids, addrs, val, pgb.replaceCancelError(err)
+ _, addrs, val, err := retrieveAddressIDsByOutpoint(ctx, pgb.db, ch, voutIndex)
+ return addrs, val, pgb.replaceCancelError(err)
}
// GetTransactionHex returns the full serialized transaction for the specified
@@ -156,7 +146,7 @@ func (pgb *ChainDB) GetBlockVerboseByHash(hash string, verboseTx bool) *chainjso
return nil
}
- blockVerbose, err := pgb.Client.GetBlockVerbose(context.TODO(), blockhash, verboseTx)
+ blockVerbose, err := pgb.Client.GetBlockVerbose(pgb.ctx, blockhash, verboseTx)
if err != nil {
log.Errorf("GetBlockVerbose(%v) failed: %v", hash, err)
return nil
@@ -188,12 +178,12 @@ func makeBlockTransactions(blockVerbose *chainjson.GetBlockVerboseResult) *apity
func (pgb *ChainDB) GetBlockHash(idx int64) (string, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- hash, err := RetrieveBlockHash(ctx, pgb.db, idx)
+ hash, err := retrieveBlockHash(ctx, pgb.db, idx)
if err != nil {
log.Errorf("Unable to get block hash for block number %d: %v", idx, err)
return "", pgb.replaceCancelError(err)
}
- return hash, nil
+ return hash.String(), nil
}
// BlockSummaryTimeRange returns the blocks created within a specified time
@@ -201,7 +191,7 @@ func (pgb *ChainDB) GetBlockHash(idx int64) (string, error) {
func (pgb *ChainDB) BlockSummaryTimeRange(min, max int64, limit int) ([]dbtypes.BlockDataBasic, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- blockSummary, err := RetrieveBlockSummaryByTimeRange(ctx, pgb.db, min, max, limit)
+ blockSummary, err := retrieveBlockSummaryByTimeRange(ctx, pgb.db, min, max, limit)
return blockSummary, pgb.replaceCancelError(err)
}
@@ -259,10 +249,20 @@ func (pgb *ChainDB) AddressUTXO(address string) ([]*dbtypes.AddressTxnOutput, bo
// Query the DB for the current UTXO set for this address.
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- txnOutputs, err := RetrieveAddressDbUTXOs(ctx, pgb.db, address)
+ txnOutputs, err := retrieveAddressDbUTXOs(ctx, pgb.db, address)
if err != nil {
return nil, false, pgb.replaceCancelError(err)
}
+ // Get the pkscripts that db doesn't have.
+ for _, out := range txnOutputs {
+ txOutRes, err := pgb.Client.GetTxOut(pgb.ctx, (*chainhash.Hash)(&out.TxHash), out.Vout, 0, false)
+ if err != nil {
+ log.Warnf("could not get tx out (%v:%d): %w", out.TxHash, out.Vout, err)
+ continue
+ }
+
+ out.PkScript, _ = hex.DecodeString(txOutRes.ScriptPubKey.Hex)
+ }
// Update the address cache.
cacheUpdated := pgb.AddressCache.StoreUTXOs(address, txnOutputs,
@@ -273,9 +273,13 @@ func (pgb *ChainDB) AddressUTXO(address string) ([]*dbtypes.AddressTxnOutput, bo
// SpendDetailsForFundingTx will return the details of any spending transactions
// (tx, index, block height) for a given funding transaction.
func (pgb *ChainDB) SpendDetailsForFundingTx(fundHash string) ([]*apitypes.SpendByFundingHash, error) {
+ ch, err := chainHashFromStr(fundHash)
+ if err != nil {
+ return nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- addrRow, err := RetrieveSpendingTxsByFundingTxWithBlockHeight(ctx, pgb.db, fundHash)
+ addrRow, err := retrieveSpendingTxsByFundingTxWithBlockHeight(ctx, pgb.db, ch)
if err != nil {
return nil, pgb.replaceCancelError(err)
}
diff --git a/db/dcrpg/insightapi_test.go b/db/dcrpg/insightapi_test.go
index 22629856e..3ef496c42 100644
--- a/db/dcrpg/insightapi_test.go
+++ b/db/dcrpg/insightapi_test.go
@@ -5,11 +5,26 @@ package dcrpg
import (
"reflect"
+ "sort"
"testing"
"github.com/decred/dcrd/chaincfg/chainhash"
)
+type txSortable struct {
+ Hash chainhash.Hash
+ Time int64
+}
+
+func sortTxsByTimeAndHash(txns []txSortable) {
+ sort.Slice(txns, func(i, j int) bool {
+ if txns[i].Time == txns[j].Time {
+ return txns[i].Hash.String() < txns[j].Hash.String()
+ }
+ return txns[i].Time > txns[j].Time
+ })
+}
+
func Test_sortTxsByTimeAndHash(t *testing.T) {
h0, _ := chainhash.NewHashFromStr("79936a1fb658ba249443f0caf4a6a44ce73afe16d543d4f7b8dcf847dfb21a9d")
h1, _ := chainhash.NewHashFromStr("484e1a03f7c3795b468d7d46071e73e207aec8295917dec569a76cca981f1b95")
diff --git a/db/dcrpg/internal/addrstmts.go b/db/dcrpg/internal/addrstmts.go
index a40535a43..196563b7b 100644
--- a/db/dcrpg/internal/addrstmts.go
+++ b/db/dcrpg/internal/addrstmts.go
@@ -4,16 +4,24 @@ import "fmt"
// These queries relate primarily to the "addresses" table.
const (
+ // instead of this addresses table, maybe:
+ // - address index table: id, address
+ // - address_transactions table: address_id, tx_db_id, vin_vout_db_id, is_funding, matching_tx_db_id
+ // get valid_mainchain, tx_hash(es), time, from transactions table
+ // get value from vouts/vins table
+ //
+ // or perhaps two tables: address_credits, address_debits
+
CreateAddressTable = `CREATE TABLE IF NOT EXISTS addresses (
id SERIAL8 PRIMARY KEY,
address TEXT,
- tx_hash TEXT,
+ tx_hash BYTEA,
valid_mainchain BOOLEAN,
- matching_tx_hash TEXT,
+ matching_tx_hash BYTEA, -- the funder if is_funding is FALSE, otherwise any known spender (may be NULL)
value INT8,
- block_time TIMESTAMPTZ NOT NULL,
+ block_time TIMESTAMPTZ NOT NULL, -- ugh, so much dup
is_funding BOOLEAN,
- tx_vin_vout_index INT4,
+ tx_vin_vout_index INT4, -- vout if is_funding is TRUE, vin if FALSE
tx_vin_vout_row_id INT8,
tx_type INT4
);`
@@ -91,11 +99,11 @@ const (
LIMIT $2 OFFSET $3`
// need random table name? does lib/pq share sessions?
- CreateTempAddrTxnsTable = `CREATE TEMPORARY TABLE address_transactions
- ON COMMIT DROP -- do in a txn!
- AS (` + addressTxnsSubQuery + `);`
+ // CreateTempAddrTxnsTable = `CREATE TEMPORARY TABLE address_transactions
+ // ON COMMIT DROP -- do in a txn!
+ // AS (` + addressTxnsSubQuery + `);`
- SelectVinsForAddress0 = `SELECT vins.tx_hash, vins.tx_index, vins.prev_tx_hash, vins.prev_tx_index,
+ SelectVinsForAddressAlt = `SELECT vins.tx_hash, vins.tx_index, vins.prev_tx_hash, vins.prev_tx_index,
vins.prev_tx_tree, vins.value_in -- no block height or block index
FROM (` + addressTxnsSubQuery + `) atxs
-- JOIN transactions txs ON txs.tx_hash=atxs.tx_hash
@@ -108,7 +116,7 @@ const (
JOIN vins ON vins.tx_hash = atxs.tx_hash -- JOIN vins on vins.id = any(txs.vin_db_ids)
LEFT JOIN transactions prevtxs ON vins.prev_tx_hash=prevtxs.tx_hash;` // LEFT JOIN because prev_tx_hash may be coinbase
- SelectVoutsForAddress = `SELECT vouts.value, vouts.tx_hash, vouts.tx_index, vouts.version, vouts.pkscript
+ SelectVoutsForAddress = `SELECT vouts.value, vouts.tx_hash, vouts.tx_index, vouts.version
FROM (` + addressTxnsSubQuery + `) atxs
JOIN vouts ON vouts.tx_hash = atxs.tx_hash;` // -- vouts.id = any(transactions.vout_db_ids)
@@ -138,6 +146,7 @@ const (
addrsColumnNames = `id, address, matching_tx_hash, tx_hash, tx_type, valid_mainchain,
tx_vin_vout_index, block_time, tx_vin_vout_row_id, value, is_funding`
+ /* unused
SelectAddressAllByAddress = `SELECT ` + addrsColumnNames + ` FROM addresses
WHERE address=$1
ORDER BY block_time DESC, tx_hash ASC;`
@@ -162,16 +171,19 @@ const (
FROM addresses
WHERE address = ANY($1) AND valid_mainchain
ORDER BY block_time DESC, tx_hash ASC;`
+ */
// selectAddressTimeGroupingCount return the count of record groups,
// where grouping is done by a specified time interval, for an addresses.
- selectAddressTimeGroupingCount = `SELECT COUNT(DISTINCT %s) FROM addresses WHERE address=$1;`
+ // selectAddressTimeGroupingCount = `SELECT COUNT(DISTINCT %s) FROM addresses WHERE address=$1;`
+ /* unused
SelectAddressUnspentCountANDValue = `SELECT COUNT(*), SUM(value) FROM addresses
WHERE address = $1 AND is_funding = TRUE AND matching_tx_hash = '' AND valid_mainchain;`
SelectAddressSpentCountANDValue = `SELECT COUNT(*), SUM(value) FROM addresses
WHERE address = $1 AND is_funding = FALSE AND matching_tx_hash != '' AND valid_mainchain;`
+ */
SelectAddressesMergedSpentCount = `SELECT COUNT( DISTINCT tx_hash ) FROM addresses
WHERE address = $1 AND is_funding = FALSE AND valid_mainchain;`
@@ -208,12 +220,12 @@ const (
COUNT(*),
SUM(value),
is_funding,
- (matching_tx_hash = '') AS all_empty_matching
- -- NOT BOOL_AND(matching_tx_hash = '') AS no_empty_matching
+ (matching_tx_hash IS NULL) AS all_empty_matching
+ -- NOT BOOL_AND(matching_tx_hash IS NULL) AS no_empty_matching
FROM addresses
WHERE address = $1 AND valid_mainchain
GROUP BY tx_type=0, is_funding,
- matching_tx_hash='' -- separate spent and unspent
+ matching_tx_hash IS NULL -- separate spent and unspent
ORDER BY count, is_funding;`
SelectAddressUnspentWithTxn = `SELECT
@@ -222,13 +234,12 @@ const (
addresses.value,
transactions.block_height,
addresses.block_time,
- addresses.tx_vin_vout_index,
- vouts.pkscript
+ addresses.tx_vin_vout_index
FROM addresses
JOIN transactions ON
addresses.tx_hash = transactions.tx_hash
JOIN vouts ON addresses.tx_vin_vout_row_id = vouts.id
- WHERE addresses.address=$1 AND addresses.is_funding AND addresses.matching_tx_hash = '' AND valid_mainchain
+ WHERE addresses.address=$1 AND addresses.is_funding AND addresses.matching_tx_hash IS NULL AND valid_mainchain
ORDER BY addresses.block_time DESC;`
// Since tx_vin_vout_row_id is the vouts table primary key (id) when
// is_funding=true, there is no need to join vouts on tx_hash and tx_index.
@@ -245,6 +256,7 @@ const (
// SELECT * FROM these
// ORDER BY block_time DESC LIMIT $2 OFFSET $3;`
+ /* unused
SelectAddressMergedDebitView = `SELECT tx_hash, valid_mainchain, block_time, sum(value), COUNT(*)
FROM addresses
WHERE address=$1 AND is_funding = FALSE -- spending transactions
@@ -256,6 +268,7 @@ const (
WHERE address=$1 AND is_funding = TRUE -- funding transactions
GROUP BY (tx_hash, valid_mainchain, block_time) -- merging common transactions in same valid mainchain block
ORDER BY block_time DESC LIMIT $2 OFFSET $3;`
+ */
SelectAddressMergedViewAll = `SELECT tx_hash, valid_mainchain, block_time, sum(CASE WHEN is_funding = TRUE THEN value ELSE 0 END),
sum(CASE WHEN is_funding = FALSE THEN value ELSE 0 END), COUNT(*)
@@ -266,9 +279,10 @@ const (
SelectAddressMergedView = SelectAddressMergedViewAll + ` LIMIT $2 OFFSET $3;`
- SelectAddressCsvView = "SELECT tx_hash, valid_mainchain, matching_tx_hash, value, block_time, is_funding, " +
- "tx_vin_vout_index, tx_type FROM addresses WHERE address=$1 ORDER BY block_time DESC"
+ // SelectAddressCsvView = "SELECT tx_hash, valid_mainchain, matching_tx_hash, value, block_time, is_funding, " +
+ // "tx_vin_vout_index, tx_type FROM addresses WHERE address=$1 ORDER BY block_time DESC"
+ /* unused
SelectAddressDebitsLimitNByAddress = `SELECT ` + addrsColumnNames + `
FROM addresses WHERE address=$1 AND is_funding = FALSE AND valid_mainchain
ORDER BY block_time DESC, tx_hash ASC
@@ -278,14 +292,15 @@ const (
FROM addresses WHERE address=$1 AND is_funding AND valid_mainchain
ORDER BY block_time DESC, tx_hash ASC
LIMIT $2 OFFSET $3;`
+ */
SelectAddressIDsByFundingOutpoint = `SELECT id, address, value
FROM addresses
WHERE tx_hash=$1 AND tx_vin_vout_index=$2 AND is_funding
ORDER BY block_time DESC;`
- SelectAddressOldestTxBlockTime = `SELECT block_time FROM addresses WHERE
- address=$1 ORDER BY block_time LIMIT 1;`
+ // SelectAddressOldestTxBlockTime = `SELECT block_time FROM addresses WHERE
+ // address=$1 ORDER BY block_time LIMIT 1;`
// selectAddressTxTypesByAddress gets the transaction type histogram for the
// given address using block time binning with bin size of block_time.
@@ -319,6 +334,15 @@ const (
AND vouts.tx_index=addresses.tx_vin_vout_index
AND transactions.id=vouts.spend_tx_row_id;`
+ UpdateAllAddressesMatchingTxHashRangeXX = `UPDATE addresses SET matching_tx_hash=vins.tx_hash
+ FROM vins, transactions
+ WHERE transactions.block_height >= $1 AND transactions.block_height < $2
+ AND addresses.is_funding AND addresses.value > 0
+ AND vins.prev_tx_hash=addresses.tx_hash
+ AND vins.prev_tx_index=addresses.tx_vin_vout_index
+ AND transactions.tx_hash=vins.tx_hash;`
+
+ /* alts
UpdateAllAddressesMatchingTxHash = `UPDATE addresses SET matching_tx_hash=transactions.tx_hash
FROM vouts, transactions
WHERE vouts.value>0 AND addresses.is_funding
@@ -346,6 +370,7 @@ const (
AS stuff
WHERE addresses.id=stuff.addr_id
AND transactions.id=stuff.spend_tx_row_id;`
+ */
// SetAddressMatchingTxHashForOutpoint sets the matching tx hash (a spending
// transaction) for the addresses rows corresponding to the specified
@@ -356,7 +381,7 @@ const (
// AssignMatchingTxHashForOutpoint is like
// SetAddressMatchingTxHashForOutpoint except that it only updates rows
// where matching_tx_hash is not already set.
- AssignMatchingTxHashForOutpoint = SetAddressMatchingTxHashForOutpoint + ` AND matching_tx_hash='';`
+ // AssignMatchingTxHashForOutpoint = SetAddressMatchingTxHashForOutpoint + ` AND matching_tx_hash='';`
SetAddressMainchainForVoutIDs = `UPDATE addresses SET valid_mainchain=$1
WHERE is_funding = TRUE AND tx_vin_vout_row_id=$2
@@ -365,68 +390,6 @@ const (
SetAddressMainchainForVinIDs = `UPDATE addresses SET valid_mainchain=$1
WHERE is_funding = FALSE AND tx_vin_vout_row_id=$2
RETURNING address;`
-
- // Patches/upgrades
-
- // The SelectAddressesGloballyInvalid and UpdateAddressesGloballyInvalid
- // queries are used to patch a bug in new block handling that neglected to
- // set valid_mainchain=false for the previous block when the new block's
- // vote bits invalidate the previous block. This pertains to dcrpg 3.5.x.
-
- // SelectAddressesGloballyInvalid selects the row ids of the addresses table
- // corresponding to transactions that should have valid_mainchain set to
- // false according to the transactions table. Should is defined as any
- // occurrence of a given transaction (hash) being flagged as is_valid AND
- // is_mainchain.
- SelectAddressesGloballyInvalid = `SELECT id, valid_mainchain
- FROM addresses
- JOIN
- ( -- globally_invalid transactions with no (is_valid && is_mainchain)=true occurrence
- SELECT tx_hash
- FROM
- (
- SELECT bool_or(is_valid AND is_mainchain) AS any_valid, tx_hash
- FROM transactions
- GROUP BY tx_hash
- ) AS foo
- WHERE any_valid=FALSE
- ) AS globally_invalid
- ON globally_invalid.tx_hash = addresses.tx_hash `
-
- // UpdateAddressesGloballyInvalid sets valid_mainchain=false on address rows
- // identified by the SelectAddressesGloballyInvalid query (ids of
- // globally_invalid subquery table) as requiring this flag set, but which do
- // not already have it set (incorrectly_valid).
- UpdateAddressesGloballyInvalid = `UPDATE addresses SET valid_mainchain=false
- FROM (
- SELECT id FROM
- (
- ` + SelectAddressesGloballyInvalid + `
- ) AS invalid_ids
- WHERE invalid_ids.valid_mainchain=true
- ) AS incorrectly_valid
- WHERE incorrectly_valid.id=addresses.id;`
-
- // UpdateAddressesFundingMatchingHash sets matching_tx_hash as per the vins
- // table. This is needed to fix partially updated addresses table entries
- // that were affected by stake invalidation.
- UpdateAddressesFundingMatchingHash = `UPDATE addresses SET matching_tx_hash=vins.tx_hash -- , matching_tx_index=vins.tx_index
- FROM vins
- WHERE addresses.tx_hash=vins.prev_tx_hash
- AND addresses.tx_vin_vout_index=vins.prev_tx_index
- AND is_funding=TRUE
- AND is_valid=TRUE
- AND matching_tx_hash!=vins.tx_hash;`
- // AND (matching_tx_hash!=vins.tx_hash OR matching_tx_index!=vins.tx_index);`
-
- // UpdateValidMainchainFromTransactions sets valid_mainchain in all rows of
- // the addresses table according to the transactions table, unlike
- // UpdateAddressesGloballyInvalid that does it selectively for only the
- // incorrectly set addresses table rows. This is much slower.
- UpdateValidMainchainFromTransactions = `UPDATE addresses
- SET valid_mainchain = (tr.is_mainchain::int * tr.is_valid::int)::boolean
- FROM transactions AS tr
- WHERE addresses.tx_hash = tr.tx_hash;`
)
// MakeAddressRowInsertStatement returns the appropriate addresses insert statement for
@@ -458,10 +421,6 @@ func MakeSelectAddressAmountFlowByAddress(group string) string {
return formatGroupingQuery(selectAddressAmountFlowByAddress, group, "block_time")
}
-func MakeSelectAddressTimeGroupingCount(group string) string {
- return formatGroupingQuery(selectAddressTimeGroupingCount, group, "block_time")
-}
-
// Since date_trunc function doesn't have an option to group by "all" grouping,
// formatGroupingQuery removes the date_trunc from the sql query as its not applicable.
func formatGroupingQuery(mainQuery, group, column string) string {
diff --git a/db/dcrpg/internal/blockstmts.go b/db/dcrpg/internal/blockstmts.go
index b65fcaf7c..411905cb0 100644
--- a/db/dcrpg/internal/blockstmts.go
+++ b/db/dcrpg/internal/blockstmts.go
@@ -8,18 +8,18 @@ package internal
const (
CreateBlockTable = `CREATE TABLE IF NOT EXISTS blocks (
id SERIAL PRIMARY KEY,
- hash TEXT NOT NULL, -- UNIQUE
+ hash BYTEA NOT NULL, -- UNIQUE
height INT4,
size INT4,
is_valid BOOLEAN,
is_mainchain BOOLEAN,
version INT4,
- numtx INT4,
- num_rtx INT4,
- tx TEXT[],
+ numtx INT4, -- REDUNDANT if we keep tx and stx (or ids)
+ num_rtx INT4, -- REDUNDANT if we keep tx and stx (or ids)
+ -- tx BYTEA[], -- REMOVE and use a blocks_txs table?
txDbIDs INT8[],
- num_stx INT4,
- stx TEXT[],
+ num_stx INT4, -- REDUNDANT if we keep tx and stx (or ids)
+ -- stx BYTEA[], -- REMOVE and use a blocks_stxs table?
stxDbIDs INT8[],
time TIMESTAMPTZ,
nonce INT8,
@@ -32,9 +32,9 @@ const (
sbits INT8,
difficulty FLOAT8,
stake_version INT4,
- previous_hash TEXT,
- chainwork TEXT,
- winners TEXT[]
+ previous_hash BYTEA,
+ chainwork TEXT, -- todo: BYTE
+ winners BYTEA[] -- remove? make a new stake table? to get TicketPoolInfo.Winners we'd need a join or second query
);`
// Block inserts. is_valid refers to blocks that have been validated by
@@ -45,15 +45,15 @@ const (
// insertBlockRow is the basis for several block insert/upsert statements.
insertBlockRow = `INSERT INTO blocks (
hash, height, size, is_valid, is_mainchain, version,
- numtx, num_rtx, tx, txDbIDs, num_stx, stx, stxDbIDs,
+ numtx, num_rtx, txDbIDs, num_stx, stxDbIDs,
time, nonce, vote_bits, voters,
fresh_stake, revocations, pool_size, bits, sbits,
difficulty, stake_version, previous_hash, chainwork, winners)
VALUES ($1, $2, $3, $4, $5, $6,
- $7, $8, $9, $10, $11, $12, $13,
- $14, $15, $16, $17, $18, $19,
- $20, $21, $22, $23, $24, $25,
- $26, $27) `
+ $7, $8, $9, $10, $11,
+ $12, $13, $14, $15,
+ $16, $17, $18, $19, $20,
+ $21, $22, $23, $24, $25) `
// InsertBlockRow inserts a new block row without checking for unique index
// conflicts. This should only be used before the unique indexes are created
@@ -70,6 +70,7 @@ const (
// either the inserted row or the existing row that causes the conflict. The
// complexity of this statement is necessary to avoid an unnecessary UPSERT,
// which would have performance consequences. The row is not locked.
+ /* unused
InsertBlockRowOnConflictDoNothing = `WITH ins AS (` +
insertBlockRow +
` ON CONFLICT (hash) DO NOTHING -- no lock on row
@@ -80,6 +81,7 @@ const (
SELECT id FROM blocks
WHERE hash = $1 -- only executed if no INSERT
LIMIT 1;`
+ */
// IndexBlockTableOnHash creates the unique index uix_block_hash on (hash).
IndexBlockTableOnHash = `CREATE UNIQUE INDEX ` + IndexOfBlocksTableOnHash + ` ON blocks(hash);`
@@ -105,8 +107,8 @@ const (
SelectBlockTimeByHeight = `SELECT time FROM blocks
WHERE height = $1 AND is_mainchain = true;`
- RetrieveBestBlockHeightAny = `SELECT id, hash, height FROM blocks
- ORDER BY height DESC LIMIT 1;`
+ // RetrieveBestBlockHeightAny = `SELECT id, hash, height FROM blocks
+ // ORDER BY height DESC LIMIT 1;`
RetrieveBestBlockHeight = `SELECT id, hash, height FROM blocks
WHERE is_mainchain = true ORDER BY height DESC LIMIT 1;`
@@ -117,11 +119,6 @@ const (
WHERE height > $1
ORDER BY height;`
- SelectGenesisTime = `SELECT time
- FROM blocks
- WHERE height = 0
- AND is_mainchain`
-
SelectWindowsByLimit = `SELECT (height/$1)*$1 AS window_start,
MAX(difficulty) AS difficulty,
SUM(num_rtx) AS txs,
@@ -153,9 +150,9 @@ const (
ORDER BY index_value DESC
LIMIT $2 OFFSET $3;`
- SelectBlocksPreviousHash = `SELECT previous_hash FROM blocks WHERE hash = $1;`
+ // SelectBlocksPreviousHash = `SELECT previous_hash FROM blocks WHERE hash = $1;`
- SelectBlocksHashes = `SELECT hash FROM blocks ORDER BY id;`
+ // SelectBlocksHashes = `SELECT hash FROM blocks ORDER BY id;`
SelectBlockVoteCount = `SELECT voters FROM blocks WHERE hash = $1;`
@@ -165,12 +162,6 @@ const (
WHERE is_mainchain = FALSE
ORDER BY height DESC;`
- SelectSideChainTips = `SELECT is_valid, height, previous_hash, hash
- FROM blocks
- JOIN block_chain ON this_hash=hash
- WHERE is_mainchain = FALSE AND block_chain.next_hash=''
- ORDER BY height DESC;`
-
SelectBlockStatus = `SELECT is_valid, is_mainchain, height, previous_hash, hash, block_chain.next_hash
FROM blocks
JOIN block_chain ON this_hash=hash
@@ -190,11 +181,11 @@ const (
WHERE is_valid = FALSE
ORDER BY height DESC;`
- SelectTxsPerDay = `SELECT date_trunc('day',time) AS date, sum(numtx)
- FROM blocks
- WHERE time > $1
- GROUP BY date
- ORDER BY date;`
+ // SelectTxsPerDay = `SELECT date_trunc('day',time) AS date, sum(numtx)
+ // FROM blocks
+ // WHERE time > $1
+ // GROUP BY date
+ // ORDER BY date;`
// blocks table updates
@@ -206,9 +197,9 @@ const (
// blocks table.
CreateBlockPrevNextTable = `CREATE TABLE IF NOT EXISTS block_chain (
block_db_id INT8 PRIMARY KEY,
- prev_hash TEXT NOT NULL,
- this_hash TEXT UNIQUE NOT NULL, -- UNIQUE
- next_hash TEXT
+ prev_hash BYTEA NOT NULL,
+ this_hash BYTEA UNIQUE NOT NULL,
+ next_hash BYTEA
);`
// InsertBlockPrevNext includes the primary key, which should be the row ID
@@ -273,7 +264,7 @@ const (
ORDER BY blocks.height DESC;`
SelectBlockDataByHash = `
- SELECT blocks.hash, blocks.height, blocks.size,
+ SELECT blocks.height, blocks.size,
blocks.difficulty, blocks.sbits, blocks.time, stats.pool_size,
stats.pool_val, blocks.winners, blocks.is_mainchain, blocks.is_valid
FROM blocks INNER JOIN stats ON blocks.id = stats.blocks_id
diff --git a/db/dcrpg/internal/meta.go b/db/dcrpg/internal/meta.go
index 8013d8b81..c31544896 100644
--- a/db/dcrpg/internal/meta.go
+++ b/db/dcrpg/internal/meta.go
@@ -9,18 +9,18 @@ const (
net_name TEXT,
currency_net INT8 PRIMARY KEY,
best_block_height INT8,
- best_block_hash TEXT,
+ best_block_hash BYTEA,
compatibility_version INT4,
schema_version INT4,
maintenance_version INT4,
ibd_complete BOOLEAN
);`
- InsertMetaRow = `INSERT INTO meta (
- net_name, currency_net, best_block_height, best_block_hash,
+ InitMetaRow = `INSERT INTO meta (
+ net_name, currency_net, best_block_height, -- best_block_hash,
compatibility_version, schema_version, maintenance_version,
ibd_complete)
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8);`
+ VALUES ($1, $2, $3, $4, $5, $6, $7);`
SelectMetaDBVersions = `SELECT
compatibility_version,
diff --git a/db/dcrpg/internal/rewind.go b/db/dcrpg/internal/rewind.go
index 9648b62c8..be977677f 100644
--- a/db/dcrpg/internal/rewind.go
+++ b/db/dcrpg/internal/rewind.go
@@ -8,6 +8,7 @@ const (
// DeleteAddresses deletes rows of the addresses table (funding and
// spending) corresponding to all of the transactions (regular and stake)
// for a given block.
+ /* unused, but maybe again since we dropped cockroach
DeleteAddresses = `DELETE FROM addresses
USING transactions, blocks
WHERE (
@@ -15,8 +16,9 @@ const (
OR
(addresses.tx_vin_vout_row_id=ANY(transactions.vout_db_ids) AND addresses.is_funding=true)
)
- AND transactions.id = ANY(array_cat(blocks.txdbids, blocks.stxdbids))
+ AND transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
AND blocks.hash=$1;`
+ */
// For CockroachDB, which does not allow the USING clause with DELETE, a
// subquery (addressesForBlockHash) is needed.
@@ -28,7 +30,7 @@ const (
// FROM transactions
// JOIN blocks ON
// blocks.hash=$1
- // AND transactions.id = ANY(array_cat(blocks.txdbids, blocks.stxdbids))
+ // AND transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
// JOIN addresses ON
// (addresses.tx_vin_vout_row_id=ANY(transactions.vin_db_ids) AND addresses.is_funding=false)
// OR (addresses.tx_vin_vout_row_id=ANY(transactions.vout_db_ids) AND addresses.is_funding=true)`
@@ -44,7 +46,7 @@ const (
SELECT transactions.id
FROM transactions
JOIN blocks ON blocks.hash = $1
- AND transactions.id = ANY(array_cat(blocks.txdbids, blocks.stxdbids))
+ AND transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
)`
DeleteAddressesSubQry = `DELETE FROM addresses WHERE id IN (` + addressesForBlockHash + `);`
@@ -53,23 +55,25 @@ const (
USING transactions, blocks
WHERE addresses.tx_vin_vout_row_id=ANY(transactions.vin_db_ids)
AND NOT addresses.is_funding
- AND transactions.id = ANY(blocks.stxdbids)
+ AND transactions.id = ANY(blocks.stxDbIDs)
AND blocks.hash=$1;`
DeleteStakeAddressesSpending = `DELETE FROM addresses
USING transactions, blocks
WHERE addresses.tx_vin_vout_row_id=ANY(transactions.vout_db_ids)
AND addresses.is_funding
- AND transactions.id = ANY(blocks.stxdbids)
+ AND transactions.id = ANY(blocks.stxDbIDs)
AND blocks.hash=$1;`
// vin row deletion by block hash
+ /* unused, but maybe again since we dropped cockroach
DeleteVins = `DELETE FROM vins
USING transactions, blocks
WHERE vins.id=ANY(transactions.vin_db_ids)
- AND transactions.id = ANY(array_cat(blocks.txdbids,blocks.stxdbids))
+ AND transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
AND blocks.hash=$1;`
+ */
// For CockroachDB, which does not allow the USING clause with DELETE, a
// subquery (vinsForBlockHash) is needed.
@@ -81,7 +85,7 @@ const (
// FROM transactions
// JOIN blocks ON
// blocks.hash=$1
- // AND transactions.id = ANY(array_cat(blocks.txdbids, blocks.stxdbids))
+ // AND transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
// JOIN vins ON
// vins.id=ANY(transactions.vin_db_ids)`
@@ -93,18 +97,19 @@ const (
SELECT transactions.id
FROM transactions
JOIN blocks ON blocks.hash = $1
- AND transactions.id = ANY(array_cat(blocks.txdbids, blocks.stxdbids))
+ AND transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
)`
DeleteVinsSubQry = `DELETE FROM vins WHERE id IN (` + vinsForBlockHash + `);`
// DeleteStakeVins deletes rows of the vins table corresponding to inputs of
// the stake transactions (transactions.vin_db_ids) for a block
- // (blocks.stxdbids) specified by its hash (blocks.hash).
+ // (blocks.stxDbIDs) specified by its hash (blocks.hash).
+ // unused, but maybe again since we dropped cockroach
DeleteStakeVins = `DELETE FROM vins
USING transactions, blocks
WHERE vins.id=ANY(transactions.vin_db_ids)
- AND transactions.id = ANY(blocks.stxdbids)
+ AND transactions.id = ANY(blocks.stxDbIDs)
AND blocks.hash=$1;`
// DeleteStakeVinsSubSelect is like DeleteStakeVins except it is implemented
// using sub-queries rather than a join.
@@ -113,7 +118,7 @@ const (
SELECT UNNEST(vin_db_ids)
FROM transactions
WHERE id IN (
- SELECT UNNEST(stxdbids)
+ SELECT UNNEST(stxDbIDs)
FROM blocks
WHERE hash=$1
)
@@ -121,11 +126,12 @@ const (
// DeleteRegularVins deletes rows of the vins table corresponding to inputs
// of the regular/non-stake transactions (transactions.vin_db_ids) for a
- // block (blocks.txdbids) specified by its hash (blocks.hash).
+ // block (blocks.txDbIDs) specified by its hash (blocks.hash).
+ // unused, but maybe again since we dropped cockroach
DeleteRegularVins = `DELETE FROM vins
USING transactions, blocks
WHERE vins.id=ANY(transactions.vin_db_ids)
- AND transactions.id = ANY(blocks.txdbids)
+ AND transactions.id = ANY(blocks.txDbIDs)
AND blocks.hash=$1;`
// DeleteRegularVinsSubSelect is like DeleteRegularVins except it is
// implemented using sub-queries rather than a join.
@@ -134,7 +140,7 @@ const (
SELECT UNNEST(vin_db_ids)
FROM transactions
WHERE id IN (
- SELECT UNNEST(txdbids)
+ SELECT UNNEST(txDbIDs)
FROM blocks
WHERE hash=$1
)
@@ -142,17 +148,19 @@ const (
// vout row deletion by block hash
+ /* unused, but maybe again since we dropped cockroach
DeleteVouts = `DELETE FROM vouts
USING transactions, blocks
WHERE vouts.id=ANY(transactions.vout_db_ids)
- AND transactions.id = ANY(array_cat(blocks.txdbids,blocks.stxdbids))
+ AND transactions.id = ANY(array_cat(blocks.txDbIDs,blocks.stxDbIDs))
AND blocks.hash=$1;`
+ */
voutsForBlockHash = `SELECT vouts.id
FROM transactions
JOIN blocks ON
blocks.hash=$1
- AND transactions.id = ANY(array_cat(blocks.txdbids, blocks.stxdbids))
+ AND transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
JOIN vouts ON
vouts.id=ANY(transactions.vout_db_ids)`
@@ -160,11 +168,12 @@ const (
// DeleteStakeVouts deletes rows of the vouts table corresponding to inputs
// of the stake transactions (transactions.vout_db_ids) for a block
- // (blocks.stxdbids) specified by its hash (blocks.hash).
+ // (blocks.stxDbIDs) specified by its hash (blocks.hash).
+ // unused, but maybe again since we dropped cockroach
DeleteStakeVouts = `DELETE FROM vouts
USING transactions, blocks
WHERE vouts.id=ANY(transactions.vout_db_ids)
- AND transactions.id = ANY(blocks.stxdbids)
+ AND transactions.id = ANY(blocks.stxDbIDs)
AND blocks.hash=$1;`
// DeleteStakeVoutsSubSelect is like DeleteStakeVouts except it is
// implemented using sub-queries rather than a join.
@@ -173,7 +182,7 @@ const (
SELECT UNNEST(vout_db_ids)
FROM transactions
WHERE id IN (
- SELECT UNNEST(stxdbids)
+ SELECT UNNEST(stxDbIDs)
FROM blocks
WHERE hash=$1
)
@@ -181,11 +190,12 @@ const (
// DeleteRegularVouts deletes rows of the vouts table corresponding to
// inputs of the regular/non-stake transactions (transactions.vout_db_ids)
- // for a block (blocks.txdbids) specified by its hash (blocks.hash).
+ // for a block (blocks.txDbIDs) specified by its hash (blocks.hash).
+ // unused, but maybe again since we dropped cockroach
DeleteRegularVouts = `DELETE FROM vouts
USING transactions, blocks
WHERE vouts.id=ANY(transactions.vout_db_ids)
- AND transactions.id = ANY(blocks.txdbids)
+ AND transactions.id = ANY(blocks.txDbIDs)
AND blocks.hash=$1;`
// DeleteRegularVoutsSubSelect is like DeleteRegularVouts except it is
// implemented using sub-queries rather than a join.
@@ -194,7 +204,7 @@ const (
SELECT UNNEST(vout_db_ids)
FROM transactions
WHERE id IN (
- SELECT UNNEST(txdbids)
+ SELECT UNNEST(txDbIDs)
FROM blocks
WHERE hash=$1
)
@@ -208,7 +218,7 @@ const (
DeleteTickets = `DELETE FROM tickets
USING blocks
- WHERE purchase_tx_db_id = ANY(blocks.stxdbids)
+ WHERE purchase_tx_db_id = ANY(blocks.stxDbIDs)
AND blocks.hash=$1;`
// DeleteTicketsSimple is simple, but slower because block_hash is second in
// a multi-column index, whereas both tickets.purchase_tx_db_id and
@@ -218,7 +228,7 @@ const (
DeleteTransactions = `DELETE FROM transactions
USING blocks
- WHERE transactions.id = ANY(array_cat(blocks.txdbids, blocks.stxdbids))
+ WHERE transactions.id = ANY(array_cat(blocks.txDbIDs, blocks.stxDbIDs))
AND blocks.hash=$1;`
DeleteTransactionsSimple = `DELETE FROM transactions
WHERE block_hash=$1
@@ -236,8 +246,4 @@ const (
DeleteBlockFromChain = `DELETE FROM block_chain
WHERE this_hash=$1
RETURNING prev_hash;`
-
- ClearBlockChainNextHash = `UPDATE block_chain
- SET next_hash=''
- WHERE next_hash=$1;`
)
diff --git a/db/dcrpg/internal/stakestmts.go b/db/dcrpg/internal/stakestmts.go
index fa7d00ebd..092397684 100644
--- a/db/dcrpg/internal/stakestmts.go
+++ b/db/dcrpg/internal/stakestmts.go
@@ -7,8 +7,8 @@ const (
CreateTicketsTable = `CREATE TABLE IF NOT EXISTS tickets (
id SERIAL PRIMARY KEY,
- tx_hash TEXT NOT NULL,
- block_hash TEXT NOT NULL,
+ tx_hash BYTEA NOT NULL,
+ block_hash BYTEA NOT NULL,
block_height INT4,
purchase_tx_db_id INT8,
stakesubmission_address TEXT,
@@ -86,9 +86,13 @@ const (
` ON tickets(pool_status);`
DeindexTicketsTableOnPoolStatus = `DROP INDEX IF EXISTS ` + IndexOfTicketsTableOnPoolStatus + ` CASCADE;`
- SelectTicketsInBlock = `SELECT * FROM tickets WHERE block_hash = $1;`
+ allCols = `id, tx_hash, block_hash, block_height, purchase_tx_db_id,
+ stakesubmission_address, is_multisig, is_split, num_inputs, price, fee, spend_type,
+ pool_status, is_mainchain, spend_height, spend_tx_db_id`
+
+ SelectTicketsInBlock = `SELECT ` + allCols + ` FROM tickets WHERE block_hash = $1;`
SelectTicketsTxDbIDsInBlock = `SELECT purchase_tx_db_id FROM tickets WHERE block_hash = $1;`
- SelectTicketsForAddress = `SELECT * FROM tickets WHERE stakesubmission_address = $1;`
+ SelectTicketsForAddress = `SELECT ` + allCols + ` FROM tickets WHERE stakesubmission_address = $1;`
forTxHashMainchainFirst = ` WHERE tx_hash = $1 ORDER BY is_mainchain DESC;`
SelectTicketIDHeightByHash = `SELECT id, block_height FROM tickets` + forTxHashMainchainFirst
@@ -99,8 +103,8 @@ const (
SelectUnspentTickets = `SELECT id, tx_hash FROM tickets
WHERE spend_type = 0 AND is_mainchain = true;`
- SelectTicketsForPriceAtLeast = `SELECT * FROM tickets WHERE price >= $1;`
- SelectTicketsForPriceAtMost = `SELECT * FROM tickets WHERE price <= $1;`
+ SelectTicketsForPriceAtLeast = `SELECT ` + allCols + ` FROM tickets WHERE price >= $1;`
+ SelectTicketsForPriceAtMost = `SELECT ` + allCols + ` FROM tickets WHERE price <= $1;`
SelectTicketsByPrice = `SELECT price,
SUM(CASE WHEN tickets.block_height >= $1 THEN 1 ELSE 0 END) as immature,
@@ -157,13 +161,13 @@ const (
CreateVotesTable = `CREATE TABLE IF NOT EXISTS votes (
id SERIAL PRIMARY KEY,
height INT4,
- tx_hash TEXT NOT NULL,
- block_hash TEXT NOT NULL,
- candidate_block_hash TEXT NOT NULL,
+ tx_hash BYTEA NOT NULL,
+ block_hash BYTEA NOT NULL,
+ candidate_block_hash BYTEA NOT NULL,
version INT4,
vote_bits INT2,
block_valid BOOLEAN,
- ticket_hash TEXT,
+ ticket_hash BYTEA,
ticket_tx_db_id INT8,
ticket_price FLOAT8,
vote_reward FLOAT8,
@@ -260,9 +264,9 @@ const (
CreateMissesTable = `CREATE TABLE IF NOT EXISTS misses (
id SERIAL PRIMARY KEY,
height INT4,
- block_hash TEXT NOT NULL,
- candidate_block_hash TEXT NOT NULL,
- ticket_hash TEXT NOT NULL
+ block_hash BYTEA NOT NULL,
+ candidate_block_hash BYTEA NOT NULL,
+ ticket_hash BYTEA NOT NULL
);`
// insertMissRow is the basis for several miss insert/upsert statements.
diff --git a/db/dcrpg/internal/stats.go b/db/dcrpg/internal/stats.go
index bf244dfb9..dfe54482f 100644
--- a/db/dcrpg/internal/stats.go
+++ b/db/dcrpg/internal/stats.go
@@ -51,11 +51,7 @@ const (
WHERE stats.height = $1
AND is_mainchain
;`
- SelectPoolInfoByHash = `
- SELECT stats.height, stats.pool_size, stats.pool_val, blocks.winners
- FROM stats JOIN blocks ON stats.blocks_id = blocks.id
- WHERE blocks.hash = $1
- ;`
+
SelectPoolInfoRange = `
SELECT stats.height, blocks.hash, stats.pool_size,
stats.pool_val, blocks.winners
diff --git a/db/dcrpg/internal/swap.go b/db/dcrpg/internal/swap.go
index 9591a9961..f457f0316 100644
--- a/db/dcrpg/internal/swap.go
+++ b/db/dcrpg/internal/swap.go
@@ -2,9 +2,9 @@ package internal
const (
CreateAtomicSwapTableV0 = `CREATE TABLE IF NOT EXISTS swaps (
- contract_tx TEXT,
+ contract_tx BYTEA,
contract_vout INT4,
- spend_tx TEXT,
+ spend_tx BYTEA,
spend_vin INT4,
spend_height INT8,
p2sh_addr TEXT,
diff --git a/db/dcrpg/internal/treasury.go b/db/dcrpg/internal/treasury.go
index 5f02636ed..3b75d611c 100644
--- a/db/dcrpg/internal/treasury.go
+++ b/db/dcrpg/internal/treasury.go
@@ -6,10 +6,10 @@ package internal
// These queries relate primarily to the "treasury" table.
const (
CreateTreasuryTable = `CREATE TABLE IF NOT EXISTS treasury (
- tx_hash TEXT,
+ tx_hash BYTEA,
tx_type INT4,
value INT8,
- block_hash TEXT,
+ block_hash BYTEA,
block_height INT8,
block_time TIMESTAMPTZ NOT NULL,
is_mainchain BOOLEAN
@@ -43,12 +43,16 @@ const (
InsertTreasuryRowOnConflictDoNothing = InsertTreasuryRow + `ON CONFLICT (tx_hash, block_hash)
DO NOTHING;`
- SelectTreasuryTxns = `SELECT * FROM treasury
+ SelectTreasuryTxns = `SELECT tx_hash, tx_type, value, block_hash,
+ block_height, block_time, is_mainchain
+ FROM treasury
WHERE is_mainchain
ORDER BY block_height DESC
LIMIT $1 OFFSET $2;`
- SelectTypedTreasuryTxns = `SELECT * FROM treasury
+ SelectTypedTreasuryTxns = `SELECT tx_hash, tx_type, value, block_hash,
+ block_height, block_time, is_mainchain
+ FROM treasury
WHERE is_mainchain
AND tx_type = $1
ORDER BY block_height DESC
diff --git a/db/dcrpg/internal/txstmts.go b/db/dcrpg/internal/txstmts.go
index 29ad09bd5..6ea5d0ba2 100644
--- a/db/dcrpg/internal/txstmts.go
+++ b/db/dcrpg/internal/txstmts.go
@@ -4,24 +4,17 @@
package internal
-import (
- "fmt"
-
- "github.com/decred/dcrd/blockchain/stake/v5"
-)
-
// These queries relate primarily to the "transactions" table.
const (
CreateTransactionTable = `CREATE TABLE IF NOT EXISTS transactions (
id SERIAL8 PRIMARY KEY,
- block_hash TEXT,
+ block_hash BYTEA, -- consider removing and using a blocks_txns table
block_height INT8,
block_time TIMESTAMPTZ,
- time TIMESTAMPTZ, -- TODO: REMOVE!
tx_type INT4,
version INT4,
tree INT2,
- tx_hash TEXT,
+ tx_hash BYTEA,
block_index INT4,
lock_time INT4,
expiry INT4,
@@ -41,19 +34,19 @@ const (
// insertTxRow is the basis for several tx insert/upsert statements.
insertTxRow = `INSERT INTO transactions (
- block_hash, block_height, block_time, time,
+ block_hash, block_height, block_time,
tx_type, version, tree, tx_hash, block_index,
lock_time, expiry, size, spent, sent, fees,
mix_count, mix_denom,
num_vin, vin_db_ids, num_vout, vout_db_ids,
is_valid, is_mainchain)
VALUES (
- $1, $2, $3, $4,
- $5, $6, $7, $8, $9,
- $10, $11, $12, $13, $14, $15,
- $16, $17,
- $18, $19, $20, $21,
- $22, $23) `
+ $1, $2, $3,
+ $4, $5, $6, $7, $8,
+ $9, $10, $11, $12, $13, $14,
+ $15, $16,
+ $17, $18, $19, $20,
+ $21, $22) `
// InsertTxRow inserts a new transaction row without checking for unique
// index conflicts. This should only be used before the unique indexes are
@@ -63,7 +56,7 @@ const (
// UpsertTxRow is an upsert (insert or update on conflict), returning the
// inserted/updated transaction row id.
UpsertTxRow = insertTxRow + `ON CONFLICT (tx_hash, block_hash) DO UPDATE
- SET is_valid = $22, is_mainchain = $23 RETURNING id;`
+ SET is_valid = $21, is_mainchain = $22 RETURNING id;`
// InsertTxRowOnConflictDoNothing allows an INSERT with a DO NOTHING on
// conflict with transactions' unique tx index, while returning the row id
@@ -78,7 +71,7 @@ const (
SELECT id FROM ins
UNION ALL
SELECT id FROM transactions
- WHERE tx_hash = $8 AND block_hash = $1 -- only executed if no INSERT
+ WHERE tx_hash = $7 AND block_hash = $1 -- only executed if no INSERT
LIMIT 1;`
// DeleteTxDuplicateRows removes rows that would violate the unique index
@@ -90,6 +83,7 @@ const (
FROM transactions) t
WHERE t.rnum > 1);`
+ /* unused
SelectTxDupIDs = `WITH dups AS (
SELECT array_agg(id) AS ids
FROM transactions
@@ -101,9 +95,7 @@ const (
FROM dups
ORDER BY dupids DESC
) AS _;`
-
- DeleteTxRows = `DELETE FROM transactions
- WHERE id = ANY($1);`
+ */
// IndexTransactionTableOnHashes creates the unique index uix_tx_hashes on
// (tx_hash, block_hash).
@@ -125,7 +117,7 @@ const (
FROM transactions
WHERE tx_hash = $1
ORDER BY is_mainchain DESC, is_valid DESC;`
- SelectTxsByBlockHash = `SELECT id, tx_hash, block_index, tree, block_time
+ SelectTxsByBlockHash = `SELECT tx_hash, block_index, tree, block_time
FROM transactions WHERE block_hash = $1;`
SelectTxBlockTimeByHash = `SELECT block_time
@@ -135,7 +127,7 @@ const (
LIMIT 1;`
SelectFullTxByHash = `SELECT id, block_hash, block_height, block_time,
- time, tx_type, version, tree, tx_hash, block_index, lock_time, expiry,
+ tx_type, version, tree, tx_hash, block_index, lock_time, expiry,
size, spent, sent, fees, mix_count, mix_denom, num_vin, vin_db_ids,
num_vout, vout_db_ids, is_valid, is_mainchain
FROM transactions WHERE tx_hash = $1
@@ -143,34 +135,15 @@ const (
LIMIT 1;`
SelectFullTxsByHash = `SELECT id, block_hash, block_height, block_time,
- time, tx_type, version, tree, tx_hash, block_index, lock_time, expiry,
+ tx_type, version, tree, tx_hash, block_index, lock_time, expiry,
size, spent, sent, fees, mix_count, mix_denom, num_vin, vin_db_ids,
num_vout, vout_db_ids, is_valid, is_mainchain
FROM transactions WHERE tx_hash = $1
ORDER BY is_mainchain DESC, is_valid DESC, block_time DESC;`
- SelectTxnsVinsByBlock = `SELECT vin_db_ids, is_valid, is_mainchain
- FROM transactions WHERE block_hash = $1;`
-
SelectTxnsVinsVoutsByBlock = `SELECT vin_db_ids, vout_db_ids, is_mainchain
FROM transactions WHERE block_hash = $1;`
- SelectTxsVinsAndVoutsIDs = `SELECT tx_type, vin_db_ids, vout_db_ids
- FROM transactions
- WHERE block_height BETWEEN $1 AND $2;`
-
- SelectTxsBlocksAboveHeight = `SELECT DISTINCT ON(block_height)
- block_height, block_hash
- FROM transactions
- WHERE block_height>$1
- AND is_mainchain;`
-
- SelectTxsBestBlock = `SELECT block_height, block_hash
- FROM transactions
- WHERE is_mainchain
- ORDER BY block_height DESC
- LIMIT 1;`
-
SelectRegularTxnsVinsVoutsByBlock = `SELECT vin_db_ids, vout_db_ids, is_mainchain
FROM transactions WHERE block_hash = $1 AND tree = 0;`
@@ -179,10 +152,6 @@ const (
WHERE tx_hash = $1
ORDER BY is_valid DESC, is_mainchain DESC, block_height DESC;`
- UpdateRegularTxnsValidMainchainByBlock = `UPDATE transactions
- SET is_valid=$1, is_mainchain=$2
- WHERE block_hash=$3 AND tree=0;`
-
UpdateRegularTxnsValidByBlock = `UPDATE transactions
SET is_valid=$1
WHERE block_hash=$2 AND tree=0;`
@@ -192,30 +161,6 @@ const (
WHERE block_hash=$2
RETURNING id;`
- UpdateTxnsValidMainchainAll = `UPDATE transactions
- SET is_valid=(b.is_valid::int + tree)::boolean, is_mainchain=b.is_mainchain
- FROM (
- SELECT hash, is_valid, is_mainchain
- FROM blocks
- ) b
- WHERE block_hash = b.hash ;`
-
- UpdateRegularTxnsValidAll = `UPDATE transactions
- SET is_valid=b.is_valid
- FROM (
- SELECT hash, is_valid
- FROM blocks
- ) b
- WHERE block_hash = b.hash AND tree = 0;`
-
- UpdateTxnsMainchainAll = `UPDATE transactions
- SET is_mainchain=b.is_mainchain
- FROM (
- SELECT hash, is_mainchain
- FROM blocks
- ) b
- WHERE block_hash = b.hash;`
-
SelectTicketsByType = `SELECT DISTINCT num_vout, COUNT(*)
FROM transactions
JOIN tickets ON transactions.id=purchase_tx_db_id
@@ -243,25 +188,6 @@ const (
// RetrieveVoutDbIDs = `SELECT unnest(vout_db_ids) FROM transactions WHERE id = $1;`
// RetrieveVoutDbID = `SELECT vout_db_ids[$2] FROM transactions WHERE id = $1;`
- SelectTicketsOutputCountByAllBlocks = `SELECT block_height,
- SUM(CASE WHEN num_vout = 3 THEN 1 ELSE 0 END) AS solo,
- SUM(CASE WHEN num_vout = 5 THEN 1 ELSE 0 END) AS pooled
- FROM transactions
- WHERE tx_type = $1
- AND block_height > $2
- GROUP BY block_height
- ORDER BY block_height;`
-
- SelectTicketsOutputCountByTPWindow = `SELECT
- floor(block_height/$3) AS count,
- SUM(CASE WHEN num_vout = 3 THEN 1 ELSE 0 END) AS solo,
- SUM(CASE WHEN num_vout = 5 THEN 1 ELSE 0 END) AS pooled
- FROM transactions
- WHERE tx_type = $1
- AND block_height > $2
- GROUP BY count
- ORDER BY count;`
-
SelectFeesPerBlockAboveHeight = `
SELECT block_height, SUM(fees) AS fees
FROM transactions
@@ -290,12 +216,14 @@ const (
ORDER BY fund_tx.block_height;`
)
+/*
var (
SelectAllRevokes = fmt.Sprintf(`SELECT id, tx_hash, block_height, vin_db_ids[0]
FROM transactions
WHERE tx_type = %d;`,
stake.TxTypeSSRtx)
)
+*/
// MakeTxInsertStatement returns the appropriate transaction insert statement
// for the desired conflict checking and handling behavior. For checked=false,
diff --git a/db/dcrpg/internal/vinoutstmts.go b/db/dcrpg/internal/vinoutstmts.go
index 3ebfc78f8..e807bf090 100644
--- a/db/dcrpg/internal/vinoutstmts.go
+++ b/db/dcrpg/internal/vinoutstmts.go
@@ -11,14 +11,14 @@ const (
CreateVinTable = `CREATE TABLE IF NOT EXISTS vins (
id SERIAL8 PRIMARY KEY,
- tx_hash TEXT,
+ tx_hash BYTEA, -- maybe a transactions_vins id-id table instead?
tx_index INT4,
tx_tree INT2,
- is_valid BOOLEAN,
+ is_valid BOOLEAN, -- dup in transactions table...
is_mainchain BOOLEAN,
block_time TIMESTAMPTZ,
- prev_tx_hash TEXT,
- prev_tx_index INT8,
+ prev_tx_hash BYTEA,
+ prev_tx_index INT8, -- int8???
prev_tx_tree INT2,
value_in INT8,
tx_type INT4
@@ -65,16 +65,11 @@ const (
FROM vins) t
WHERE t.rnum > 1);`
- ShowCreateVinsTable = `WITH a AS (SHOW CREATE vins) SELECT create_statement FROM a;`
- DistinctVinsToTempTable = `INSERT INTO vins_temp
- SELECT DISTINCT ON (tx_hash, tx_index) *
- FROM vins;`
- RenameVinsTemp = `ALTER TABLE vins_temp RENAME TO vins;`
-
+ /* unused
SelectVinDupIDs = `WITH dups AS (
SELECT array_agg(id) AS ids
FROM vins
- GROUP BY tx_hash, tx_index
+ GROUP BY tx_hash, tx_index
HAVING count(id)>1
)
SELECT array_agg(dupids) FROM (
@@ -82,9 +77,7 @@ const (
FROM dups
ORDER BY dupids DESC
) AS _;`
-
- DeleteVinRows = `DELETE FROM vins
- WHERE id = ANY($1);`
+ */
IndexVinTableOnVins = `CREATE UNIQUE INDEX ` + IndexOfVinsTableOnVin +
` ON vins(tx_hash, tx_index, tx_tree);`
@@ -94,9 +87,6 @@ const (
` ON vins(prev_tx_hash, prev_tx_index);`
DeindexVinTableOnPrevOuts = `DROP INDEX ` + IndexOfVinsTableOnPrevOut + ` CASCADE;`
- SelectVinIDsALL = `SELECT id FROM vins;`
- CountVinsRows = `SELECT reltuples::BIGINT AS estimate FROM pg_class WHERE relname='vins';`
-
SelectSpendingTxsByPrevTx = `SELECT id, tx_hash, tx_index, prev_tx_index FROM vins WHERE prev_tx_hash=$1;`
SelectSpendingTxsByPrevTxWithBlockHeight = `SELECT prev_tx_index, vins.tx_hash, vins.tx_index, block_height
FROM vins LEFT JOIN transactions ON
@@ -104,21 +94,14 @@ const (
transactions.is_valid AND
transactions.is_mainchain
WHERE prev_tx_hash=$1 AND vins.is_valid AND vins.is_mainchain;`
- SelectSpendingTxByPrevOut = `SELECT id, tx_hash, tx_index, tx_tree FROM vins
+ SelectSpendingTxByPrevOut = `SELECT id, tx_hash, tx_index FROM vins
WHERE prev_tx_hash=$1 AND prev_tx_index=$2 ORDER BY is_valid DESC, is_mainchain DESC, block_time DESC;`
- SelectFundingTxsByTx = `SELECT id, prev_tx_hash FROM vins WHERE tx_hash=$1;`
- SelectFundingTxByTxIn = `SELECT id, prev_tx_hash FROM vins WHERE tx_hash=$1 AND tx_index=$2;`
- SelectFundingOutpointByTxIn = `SELECT id, prev_tx_hash, prev_tx_index, prev_tx_tree FROM vins
- WHERE tx_hash=$1 AND tx_index=$2;`
- SelectFundingOutpointByVinID = `SELECT prev_tx_hash, prev_tx_index, prev_tx_tree FROM vins WHERE id=$1;`
SelectFundingOutpointIndxByVinID = `SELECT prev_tx_index FROM vins WHERE id=$1;`
- SelectFundingTxByVinID = `SELECT prev_tx_hash FROM vins WHERE id=$1;`
- SelectSpendingTxByVinID = `SELECT tx_hash, tx_index, tx_tree FROM vins WHERE id=$1;`
- SelectAllVinInfoByID = `SELECT tx_hash, tx_index, tx_tree, is_valid, is_mainchain, block_time,
+ SelectAllVinInfoByID = `SELECT tx_hash, tx_index, tx_tree, is_valid, is_mainchain, block_time, --- could easily do this by tx_hash and tx_index
prev_tx_hash, prev_tx_index, prev_tx_tree, value_in, tx_type FROM vins WHERE id = $1;`
- SelectVinVoutPairByID = `SELECT tx_hash, tx_index, prev_tx_hash, prev_tx_index FROM vins WHERE id = $1;`
+ /* alt without spend_tx_row_id
SelectUTXOsViaVinsMatch = `SELECT vouts.id, vouts.tx_hash, vouts.tx_index, -- row ID and outpoint
vouts.script_addresses, vouts.value, vouts.mixed -- value, addresses, and mixed flag of output
FROM vouts
@@ -129,6 +112,7 @@ const (
WHERE vins.prev_tx_hash IS NULL -- unspent, condition applied after join, which will put NULL when no vin matches the vout
AND array_length(script_addresses, 1)>0
AND transactions.is_mainchain AND transactions.is_valid;`
+ */
SelectUTXOs = `SELECT vouts.id, vouts.tx_hash, vouts.tx_index, vouts.script_addresses, vouts.value, vouts.mixed
FROM vouts
@@ -138,27 +122,15 @@ const (
SetIsValidIsMainchainByTxHash = `UPDATE vins SET is_valid = $1, is_mainchain = $2
WHERE tx_hash = $3 AND block_time = $4;`
- SetIsValidIsMainchainByVinID = `UPDATE vins SET is_valid = $2, is_mainchain = $3
- WHERE id = $1;`
- SetIsValidByTxHash = `UPDATE vins SET is_valid = $1
- WHERE tx_hash = $2 AND block_time = $3;`
- SetIsValidByVinID = `UPDATE vins SET is_valid = $2
- WHERE id = $1;`
- SetIsMainchainByTxHash = `UPDATE vins SET is_mainchain = $1
- WHERE tx_hash = $2 AND block_time = $3;`
SetIsMainchainByVinID = `UPDATE vins SET is_mainchain = $2
WHERE id = $1;`
- // SetVinsTableCoinSupplyUpgrade does not set is_mainchain because that upgrade comes after this one
- SetVinsTableCoinSupplyUpgrade = `UPDATE vins SET is_valid = $1, block_time = $3, value_in = $4
- WHERE tx_hash = $5 AND tx_index = $6 AND tx_tree = $7;`
-
// SelectCoinSupply fetches the newly minted atoms per block by filtering
// for stakebase, treasurybase, and stake-validated coinbase transactions.
SelectCoinSupply = `SELECT vins.block_time, sum(vins.value_in)
FROM vins JOIN transactions
ON vins.tx_hash = transactions.tx_hash
- WHERE vins.prev_tx_hash = '0000000000000000000000000000000000000000000000000000000000000000'
+ WHERE vins.prev_tx_hash = '\x0000000000000000000000000000000000000000000000000000000000000000'::bytea
AND transactions.block_height > $1
AND vins.is_mainchain AND (vins.is_valid OR vins.tx_tree != 0)
AND vins.tx_type = ANY(ARRAY[0,2,6]) --- coinbase(regular),ssgen,treasurybase, but NOT tspend, same as =ANY('{0,2,6}') or IN(0,2,6)
@@ -169,23 +141,22 @@ const (
CreateVoutTable = `CREATE TABLE IF NOT EXISTS vouts (
id SERIAL8 PRIMARY KEY,
- tx_hash TEXT,
+ tx_hash BYTEA, -- maybe a transactions_vouts id-id table instead
tx_index INT4,
tx_tree INT2,
value INT8,
version INT2,
- pkscript BYTEA,
- script_req_sigs INT4,
+ -- pkscript BYTEA, -- ask the node
script_type TEXT,
- script_addresses TEXT[],
+ script_addresses TEXT, -- but the addresses table... (!)
mixed BOOLEAN DEFAULT FALSE,
spend_tx_row_id INT8
);`
// insertVinRow is the basis for several vout insert/upsert statements.
insertVoutRow = `INSERT INTO vouts (tx_hash, tx_index, tx_tree, value,
- version, pkscript, script_req_sigs, script_type, script_addresses, mixed)
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ` // not with spend_tx_row_id
+ version, script_type, script_addresses, mixed)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ` // not with spend_tx_row_id
// InsertVoutRow inserts a new vout row without checking for unique index
// conflicts. This should only be used before the unique indexes are created
@@ -234,26 +205,19 @@ const (
WHERE t.rnum > 1
);`
- ShowCreateVoutsTable = `WITH a AS (SHOW CREATE vouts) SELECT create_statement FROM a;`
- DistinctVoutsToTempTable = `INSERT INTO vouts_temp
- SELECT DISTINCT ON (tx_hash, tx_index) *
- FROM vouts;`
- RenameVoutsTemp = `ALTER TABLE vouts_temp RENAME TO vouts;`
-
- SelectVoutDupIDs = `WITH dups AS (
- SELECT array_agg(id) AS ids
- FROM vouts
- GROUP BY tx_hash, tx_index
- HAVING count(id)>1
- )
- SELECT array_agg(dupids) FROM (
- SELECT unnest(ids) AS dupids
- FROM dups
- ORDER BY dupids DESC
- ) AS _;`
-
- DeleteVoutRows = `DELETE FROM vins
- WHERE id = ANY($1);`
+ /*
+ SelectVoutDupIDs = `WITH dups AS (
+ SELECT array_agg(id) AS ids
+ FROM vouts
+ GROUP BY tx_hash, tx_index
+ HAVING count(id)>1
+ )
+ SELECT array_agg(dupids) FROM (
+ SELECT unnest(ids) AS dupids
+ FROM dups
+ ORDER BY dupids DESC
+ ) AS _;`
+ */
// IndexVoutTableOnTxHashIdx creates the unique index uix_vout_txhash_ind on
// (tx_hash, tx_index, tx_tree).
@@ -268,15 +232,11 @@ const (
SelectVoutAddressesByTxOut = `SELECT id, script_addresses, value, mixed FROM vouts
WHERE tx_hash = $1 AND tx_index = $2 AND tx_tree = $3;`
- SelectPkScriptByID = `SELECT version, pkscript FROM vouts WHERE id=$1;`
- SelectPkScriptByOutpoint = `SELECT version, pkscript FROM vouts WHERE tx_hash=$1 and tx_index=$2;`
- SelectPkScriptByVinID = `SELECT version, pkscript FROM vouts
- JOIN vins ON vouts.tx_hash=vins.prev_tx_hash and vouts.tx_index=vins.prev_tx_index
- WHERE vins.id=$1;`
-
- SelectVoutIDByOutpoint = `SELECT id FROM vouts WHERE tx_hash=$1 and tx_index=$2;`
- SelectVoutByID = `SELECT * FROM vouts WHERE id=$1;`
+ SelectVoutByID = `SELECT id, tx_hash, tx_index, tx_tree, is_valid, is_mainchain,
+ block_time, prev_tx_hash, prev_tx_index, prev_tx_tree, value_in, tx_type
+ FROM vouts WHERE id=$1;`
+ // TEST ONLY REMOVE
RetrieveVoutValue = `SELECT value FROM vouts WHERE tx_hash=$1 and tx_index=$2;`
RetrieveVoutValues = `SELECT value, tx_index, tx_tree FROM vouts WHERE tx_hash=$1;`
)
diff --git a/db/dcrpg/pgblockchain.go b/db/dcrpg/pgblockchain.go
index 6ff5254ec..6c2c27dab 100644
--- a/db/dcrpg/pgblockchain.go
+++ b/db/dcrpg/pgblockchain.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018-2022, The Decred developers
+// Copyright (c) 2018-2023, The Decred developers
// Copyright (c) 2017, The dcrdata developers
// See LICENSE for details.
@@ -11,7 +11,6 @@ import (
"encoding/hex"
"errors"
"fmt"
- "math"
"sort"
"strings"
"sync"
@@ -30,7 +29,6 @@ import (
"github.com/decred/dcrd/txscript/v4/stdscript"
"github.com/decred/dcrd/wire"
humanize "github.com/dustin/go-humanize"
- "github.com/lib/pq"
"github.com/decred/dcrdata/db/dcrpg/v8/internal"
apitypes "github.com/decred/dcrdata/v8/api/types"
@@ -130,21 +128,21 @@ func UpdateTicketPoolData(interval dbtypes.TimeBasedGrouping, timeGraph *dbtypes
// utxoStore provides a UTXOData cache with thread-safe get/set methods.
type utxoStore struct {
- sync.Mutex
- c map[string]map[uint32]*dbtypes.UTXOData
+ sync.RWMutex
+ c map[dbtypes.ChainHash]map[uint32]*dbtypes.UTXOData
}
// newUtxoStore constructs a new utxoStore.
func newUtxoStore(prealloc int) utxoStore {
return utxoStore{
- c: make(map[string]map[uint32]*dbtypes.UTXOData, prealloc),
+ c: make(map[dbtypes.ChainHash]map[uint32]*dbtypes.UTXOData, prealloc),
}
}
// Get attempts to locate UTXOData for the specified outpoint. If the data is
// not in the cache, a nil pointer and false are returned. If the data is
// located, the data and true are returned, and the data is evicted from cache.
-func (u *utxoStore) Get(txHash string, txIndex uint32) (*dbtypes.UTXOData, bool) {
+func (u *utxoStore) Get(txHash dbtypes.ChainHash, txIndex uint32) (*dbtypes.UTXOData, bool) {
u.Lock()
defer u.Unlock()
utxoData, ok := u.c[txHash][txIndex]
@@ -158,9 +156,9 @@ func (u *utxoStore) Get(txHash string, txIndex uint32) (*dbtypes.UTXOData, bool)
return utxoData, ok
}
-func (u *utxoStore) Peek(txHash string, txIndex uint32) *dbtypes.UTXOData {
- u.Lock()
- defer u.Unlock()
+func (u *utxoStore) Peek(txHash dbtypes.ChainHash, txIndex uint32) *dbtypes.UTXOData {
+ u.RLock()
+ defer u.RUnlock()
txVals, ok := u.c[txHash]
if !ok {
return nil
@@ -168,7 +166,7 @@ func (u *utxoStore) Peek(txHash string, txIndex uint32) *dbtypes.UTXOData {
return txVals[txIndex]
}
-func (u *utxoStore) set(txHash string, txIndex uint32, voutDbID int64, addrs []string, val int64, mixed bool) {
+func (u *utxoStore) set(txHash dbtypes.ChainHash, txIndex uint32, voutDbID int64, addrs []string, val int64, mixed bool) {
txUTXOVals, ok := u.c[txHash]
if !ok {
u.c[txHash] = map[uint32]*dbtypes.UTXOData{
@@ -191,7 +189,7 @@ func (u *utxoStore) set(txHash string, txIndex uint32, voutDbID int64, addrs []s
// Set stores the addresses and amount in a UTXOData entry in the cache for the
// given outpoint.
-func (u *utxoStore) Set(txHash string, txIndex uint32, voutDbID int64, addrs []string, val int64, mixed bool) {
+func (u *utxoStore) Set(txHash dbtypes.ChainHash, txIndex uint32, voutDbID int64, addrs []string, val int64, mixed bool) {
u.Lock()
defer u.Unlock()
u.set(txHash, txIndex, voutDbID, addrs, val, mixed)
@@ -207,7 +205,7 @@ func (u *utxoStore) Reinit(utxos []dbtypes.UTXO) {
// Pre-allocate the transaction hash map assuming the number of unique
// transaction hashes in input is roughly 2/3 of the number of UTXOs.
prealloc := 2 * len(utxos) / 3
- u.c = make(map[string]map[uint32]*dbtypes.UTXOData, prealloc)
+ u.c = make(map[dbtypes.ChainHash]map[uint32]*dbtypes.UTXOData, prealloc)
for i := range utxos {
u.set(utxos[i].TxHash, utxos[i].TxIndex, utxos[i].VoutDbID, utxos[i].Addresses, utxos[i].Value, utxos[i].Mixed)
}
@@ -215,8 +213,8 @@ func (u *utxoStore) Reinit(utxos []dbtypes.UTXO) {
// Size returns the size of the utxo cache in number of UTXOs.
func (u *utxoStore) Size() (sz int) {
- u.Lock()
- defer u.Unlock()
+ u.RLock()
+ defer u.RUnlock()
for _, m := range u.c {
sz += len(m)
}
@@ -296,7 +294,7 @@ type ChainDeployments struct {
type BestBlock struct {
mtx sync.RWMutex
height int64
- hash string
+ hash dbtypes.ChainHash
}
func (pgb *ChainDB) timeoutError() string {
@@ -363,7 +361,11 @@ func (pgb *ChainDB) MissingSideChainBlocks() ([]dbtypes.SideChain, int, error) {
sideHeightDB, err := pgb.BlockHeight(sideChain[is])
if errors.Is(err, dbtypes.ErrNoResult) {
// This block is NOT already in the DB.
- blocksToStore[it].Hashes = append(blocksToStore[it].Hashes, sideChain[is])
+ ch, err := chainhash.NewHashFromStr(sideChain[is])
+ if err != nil {
+ return nil, 0, err
+ }
+ blocksToStore[it].Hashes = append(blocksToStore[it].Hashes, *ch)
blocksToStore[it].Heights = append(blocksToStore[it].Heights, sideHeight)
nSideChainBlocks++
} else if err == nil {
@@ -390,7 +392,7 @@ func (pgb *ChainDB) MissingSideChainBlocks() ([]dbtypes.SideChain, int, error) {
// TicketTxnIDGetter provides a cache for DB row IDs of tickets.
type TicketTxnIDGetter struct {
mtx sync.RWMutex
- idCache map[string]uint64
+ idCache map[dbtypes.ChainHash]uint64
db *sql.DB
}
@@ -398,7 +400,7 @@ type TicketTxnIDGetter struct {
// hash. A cache is checked first. In the event of a cache hit, the DB ID is
// returned and deleted from the internal cache. In the event of a cache miss,
// the database is queried. If the database query fails, the error is non-nil.
-func (t *TicketTxnIDGetter) TxnDbID(txid string, expire bool) (uint64, error) {
+func (t *TicketTxnIDGetter) TxnDbID(txid dbtypes.ChainHash, expire bool) (uint64, error) {
if t == nil {
panic("You're using an uninitialized TicketTxnIDGetter")
}
@@ -415,11 +417,11 @@ func (t *TicketTxnIDGetter) TxnDbID(txid string, expire bool) (uint64, error) {
}
// Cache miss. Get the row id by hash from the tickets table.
log.Tracef("Cache miss for %s.", txid)
- return RetrieveTicketIDByHashNoCancel(t.db, txid)
+ return retrieveTicketIDByHashNoCancel(t.db, txid)
}
// Set stores the (transaction hash, DB row ID) pair a map for future access.
-func (t *TicketTxnIDGetter) Set(txid string, txDbID uint64) {
+func (t *TicketTxnIDGetter) Set(txid dbtypes.ChainHash, txDbID uint64) {
if t == nil {
return
}
@@ -429,7 +431,7 @@ func (t *TicketTxnIDGetter) Set(txid string, txDbID uint64) {
}
// SetN stores several (transaction hash, DB row ID) pairs in the map.
-func (t *TicketTxnIDGetter) SetN(txid []string, txDbID []uint64) {
+func (t *TicketTxnIDGetter) SetN(txid []dbtypes.ChainHash, txDbID []uint64) {
if t == nil {
return
}
@@ -444,7 +446,7 @@ func (t *TicketTxnIDGetter) SetN(txid []string, txDbID []uint64) {
func NewTicketTxnIDGetter(db *sql.DB) *TicketTxnIDGetter {
return &TicketTxnIDGetter{
db: db,
- idCache: make(map[string]uint64),
+ idCache: make(map[dbtypes.ChainHash]uint64),
}
}
@@ -492,7 +494,7 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
}
}
- pgVersion, pgVerNum, err := RetrievePGVersion(db)
+ pgVersion, pgVerNum, err := retrievePGVersion(db)
if err != nil {
return nil, err
}
@@ -504,13 +506,13 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
// Optionally logs the PostgreSQL configuration.
if !cfg.HidePGConfig {
- perfSettings, err := RetrieveSysSettingsPerformance(db)
+ perfSettings, err := retrieveSysSettingsPerformance(db)
if err != nil {
return nil, err
}
log.Infof("postgres configuration settings:\n%v", perfSettings)
- servSettings, err := RetrieveSysSettingsServer(db)
+ servSettings, err := retrieveSysSettingsServer(db)
if err != nil {
return nil, err
}
@@ -518,7 +520,7 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
}
// Check the synchronous_commit setting.
- syncCommit, err := RetrieveSysSettingSyncCommit(db)
+ syncCommit, err := retrieveSysSettingSyncCommit(db)
if err != nil {
return nil, err
}
@@ -530,7 +532,7 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
return nil, fmt.Errorf("failed to set synchronous_commit: %w", err)
}
// Verify that the setting was changed.
- if syncCommit, err = RetrieveSysSettingSyncCommit(db); err != nil {
+ if syncCommit, err = retrieveSysSettingSyncCommit(db); err != nil {
return nil, err
}
if syncCommit != "off" {
@@ -570,14 +572,14 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
if err = CreateTables(db); err != nil {
return nil, fmt.Errorf("failed to create tables: %w", err)
}
- err = insertMetaData(db, &metaData{
+ err = initMetaData(db, &metaData{
netName: params.Name,
currencyNet: uint32(params.Net),
bestBlockHeight: -1,
dbVer: *targetDatabaseVersion,
})
if err != nil {
- return nil, fmt.Errorf("insertMetaData failed: %w", err)
+ return nil, fmt.Errorf("initMetaData failed: %w", err)
}
case metaNotFoundErr:
log.Errorf("Legacy DB versioning found. No upgrade supported. Wipe all data and start fresh.")
@@ -586,35 +588,34 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
}
// Get the best block height from the blocks table.
- bestHeight, bestHash, err := RetrieveBestBlock(ctx, db)
+ bestHeight, bestHash, err := retrieveBestBlock(ctx, db)
if err != nil {
- return nil, fmt.Errorf("RetrieveBestBlock: %w", err)
+ return nil, fmt.Errorf("retrieveBestBlock: %w", err)
}
// NOTE: Once legacy versioned tables are no longer in use, use the height
- // and hash from DBBestBlock instead.
+ // and hash from dbBestBlock instead.
// Verify that the best
// block in the meta table is the same as in the blocks table. If the blocks
// table is ahead of the meta table, it is likely that the data for the best
// block was not fully inserted into all tables. Purge data back to the meta
// table's best block height. Also purge if the hashes do not match.
- dbHash, dbHeightInit, err := DBBestBlock(ctx, db)
+ dbHash, dbHeightInit, err := dbBestBlock(ctx, db)
if err != nil {
- return nil, fmt.Errorf("DBBestBlock: %w", err)
+ return nil, fmt.Errorf("dbBestBlock: %w", err)
}
// Best block height in the transactions table (written to even before
// the blocks table).
- bestTxsBlockHeight, bestTxsBlockHash, err :=
- RetrieveTxsBestBlockMainchain(ctx, db)
- if err != nil {
- return nil, err
- }
-
- if bestTxsBlockHeight > bestHeight {
- bestHeight = bestTxsBlockHeight
- bestHash = bestTxsBlockHash
- }
+ // bestTxsBlockHeight, bestTxsBlockHash, err :=
+ // retrieveTxsBestBlockMainchain(ctx, db)
+ // if err != nil {
+ // return nil, err
+ // }
+ // if bestTxsBlockHeight > bestHeight {
+ // bestHeight = bestTxsBlockHeight
+ // bestHash = bestTxsBlockHash
+ // }
// The meta table's best block height should never end up larger than
// the blocks table's best block height, but purge a block anyway since
@@ -624,13 +625,13 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
log.Warnf("Best block height in meta table (%d) "+
"greater than best height in blocks table (%d)!",
dbHeightInit, bestHeight)
- _, bestHeight, bestHash, err = DeleteBestBlock(ctx, db)
+ _, bestHeight, bestHash, err = deleteBestBlock(ctx, db)
if err != nil {
return nil, fmt.Errorf("DeleteBestBlock: %w", err)
}
- dbHash, dbHeightInit, err = DBBestBlock(ctx, db)
+ dbHash, dbHeightInit, err = dbBestBlock(ctx, db)
if err != nil {
- return nil, fmt.Errorf("DBBestBlock: %w", err)
+ return nil, fmt.Errorf("dbBestBlock: %w", err)
}
}
@@ -643,7 +644,7 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
// Delete the best block across all tables, updating the best block
// in the meta table.
- _, bestHeight, bestHash, err = DeleteBestBlock(ctx, db)
+ _, bestHeight, bestHash, err = deleteBestBlock(ctx, db)
if err != nil {
return nil, fmt.Errorf("DeleteBestBlock: %w", err)
}
@@ -653,9 +654,9 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
// Now dbHash must equal bestHash. If not, DeleteBestBlock failed to
// update the meta table.
- dbHash, _, err = DBBestBlock(ctx, db)
+ dbHash, _, err = dbBestBlock(ctx, db)
if err != nil {
- return nil, fmt.Errorf("DBBestBlock: %w", err)
+ return nil, fmt.Errorf("dbBestBlock: %w", err)
}
if dbHash != bestHash {
return nil, fmt.Errorf("best block hash in meta and blocks tables do not match: "+
@@ -671,7 +672,7 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
log.Infof("Pre-loading unspent ticket info for InsertVote optimization.")
unspentTicketCache := NewTicketTxnIDGetter(db)
- unspentTicketDbIDs, unspentTicketHashes, err := RetrieveUnspentTickets(ctx, db)
+ unspentTicketDbIDs, unspentTicketHashes, err := retrieveUnspentTickets(ctx, db)
if err != nil && !errors.Is(err, sql.ErrNoRows) && !strings.HasSuffix(err.Error(), "does not exist") {
return nil, err
}
@@ -735,7 +736,7 @@ func NewChainDB(ctx context.Context, cfg *ChainDBCfg, stakeDB *stakedb.StakeData
// Update the current chain state in the ChainDB
if client != nil {
- bci, err := chainDB.BlockchainInfo()
+ bci, err := chainDB.Client.GetBlockChainInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to fetch the latest blockchain info")
}
@@ -840,31 +841,42 @@ func (pgb *ChainDB) DropTables() {
func (pgb *ChainDB) SideChainBlocks() ([]*dbtypes.BlockStatus, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- scb, err := RetrieveSideChainBlocks(ctx, pgb.db)
+ scb, err := retrieveSideChainBlocks(ctx, pgb.db)
return scb, pgb.replaceCancelError(err)
}
// SideChainTips retrieves the tip/head block for all known side chains.
+/*
func (pgb *ChainDB) SideChainTips() ([]*dbtypes.BlockStatus, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- sct, err := RetrieveSideChainTips(ctx, pgb.db)
+ sct, err := retrieveSideChainTips(ctx, pgb.db)
return sct, pgb.replaceCancelError(err)
}
+*/
// DisapprovedBlocks retrieves all blocks disapproved by stakeholder votes.
func (pgb *ChainDB) DisapprovedBlocks() ([]*dbtypes.BlockStatus, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- disb, err := RetrieveDisapprovedBlocks(ctx, pgb.db)
+ disb, err := retrieveDisapprovedBlocks(ctx, pgb.db)
return disb, pgb.replaceCancelError(err)
}
+func chainHashFromStr(hash string) (ch dbtypes.ChainHash, err error) {
+ err = chainhash.Decode((*chainhash.Hash)(&ch), hash)
+ return
+}
+
// BlockStatus retrieves the block chain status of the specified block.
func (pgb *ChainDB) BlockStatus(hash string) (dbtypes.BlockStatus, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- bs, err := RetrieveBlockStatus(ctx, pgb.db, hash)
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return dbtypes.BlockStatus{}, err
+ }
+ bs, err := retrieveBlockStatus(ctx, pgb.db, ch)
return bs, pgb.replaceCancelError(err)
}
@@ -873,13 +885,17 @@ func (pgb *ChainDB) BlockStatus(hash string) (dbtypes.BlockStatus, error) {
func (pgb *ChainDB) BlockStatuses(height int64) ([]*dbtypes.BlockStatus, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- blocks, err := RetrieveBlockStatuses(ctx, pgb.db, height)
+ blocks, err := retrieveBlockStatuses(ctx, pgb.db, height)
return blocks, pgb.replaceCancelError(err)
}
// blockFlags retrieves the block's isValid and isMainchain flags.
func (pgb *ChainDB) blockFlags(ctx context.Context, hash string) (bool, bool, error) {
- iv, im, err := RetrieveBlockFlags(ctx, pgb.db, hash)
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return false, false, err
+ }
+ iv, im, err := retrieveBlockFlags(ctx, pgb.db, ch)
return iv, im, pgb.replaceCancelError(err)
}
@@ -897,8 +913,8 @@ func (pgb *ChainDB) BlockFlagsNoCancel(hash string) (bool, bool, error) {
// blockChainDbID gets the row ID of the given block hash in the block_chain
// table. The cancellation context is used without timeout.
-func (pgb *ChainDB) blockChainDbID(ctx context.Context, hash string) (dbID uint64, err error) {
- err = pgb.db.QueryRowContext(ctx, internal.SelectBlockChainRowIDByHash, hash).Scan(&dbID)
+func (pgb *ChainDB) blockChainDbID(ctx context.Context, ch dbtypes.ChainHash) (dbID uint64, err error) {
+ err = pgb.db.QueryRowContext(ctx, internal.SelectBlockChainRowIDByHash, ch).Scan(&dbID)
err = pgb.replaceCancelError(err)
return
}
@@ -906,13 +922,21 @@ func (pgb *ChainDB) blockChainDbID(ctx context.Context, hash string) (dbID uint6
// BlockChainDbID gets the row ID of the given block hash in the block_chain
// table. The cancellation context is used without timeout.
func (pgb *ChainDB) BlockChainDbID(hash string) (dbID uint64, err error) {
- return pgb.blockChainDbID(pgb.ctx, hash)
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return 0, err
+ }
+ return pgb.blockChainDbID(pgb.ctx, ch)
}
// BlockChainDbIDNoCancel gets the row ID of the given block hash in the
// block_chain table. The cancellation context is used without timeout.
func (pgb *ChainDB) BlockChainDbIDNoCancel(hash string) (dbID uint64, err error) {
- return pgb.blockChainDbID(context.Background(), hash)
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return 0, err
+ }
+ return pgb.blockChainDbID(context.Background(), ch)
}
// RegisterCharts registers chart data fetchers and appenders with the provided
@@ -971,9 +995,13 @@ func (pgb *ChainDB) RegisterCharts(charts *cache.ChartData) {
// appears, along with the index of the transaction in each of the blocks. The
// next and previous block hashes are NOT SET in each BlockStatus.
func (pgb *ChainDB) TransactionBlocks(txHash string) ([]*dbtypes.BlockStatus, []uint32, error) {
+ ch, err := chainHashFromStr(txHash)
+ if err != nil {
+ return nil, nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- hashes, heights, inds, valids, mainchains, err := RetrieveTxnsBlocks(ctx, pgb.db, txHash)
+ hashes, heights, inds, valids, mainchains, err := retrieveTxnsBlocks(ctx, pgb.db, ch)
if err != nil {
return nil, nil, pgb.replaceCancelError(err)
}
@@ -997,7 +1025,7 @@ func (pgb *ChainDB) TransactionBlocks(txHash string) ([]*dbtypes.BlockStatus, []
func (pgb *ChainDB) HeightDB() (int64, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, height, err := DBBestBlock(ctx, pgb.db)
+ _, height, err := dbBestBlock(ctx, pgb.db)
return height, pgb.replaceCancelError(err)
}
@@ -1005,8 +1033,8 @@ func (pgb *ChainDB) HeightDB() (int64, error) {
func (pgb *ChainDB) HashDB() (string, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- hash, _, err := DBBestBlock(ctx, pgb.db)
- return hash, pgb.replaceCancelError(err)
+ hash, _, err := dbBestBlock(ctx, pgb.db)
+ return hash.String(), pgb.replaceCancelError(err)
}
// HeightHashDB retrieves the best block height and hash according to the meta
@@ -1014,8 +1042,8 @@ func (pgb *ChainDB) HashDB() (string, error) {
func (pgb *ChainDB) HeightHashDB() (int64, string, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- hash, height, err := DBBestBlock(ctx, pgb.db)
- return height, hash, pgb.replaceCancelError(err)
+ hash, height, err := dbBestBlock(ctx, pgb.db)
+ return height, hash.String(), pgb.replaceCancelError(err)
}
// HeightDBLegacy queries the blocks table for the best block height. When the
@@ -1023,7 +1051,7 @@ func (pgb *ChainDB) HeightHashDB() (int64, string, error) {
func (pgb *ChainDB) HeightDBLegacy() (int64, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- bestHeight, _, _, err := RetrieveBestBlockHeight(ctx, pgb.db)
+ bestHeight, _, err := retrieveBestBlockHeight(ctx, pgb.db)
height := int64(bestHeight)
if errors.Is(err, sql.ErrNoRows) {
height = -1
@@ -1035,8 +1063,8 @@ func (pgb *ChainDB) HeightDBLegacy() (int64, error) {
func (pgb *ChainDB) HashDBLegacy() (string, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, bestHash, _, err := RetrieveBestBlockHeight(ctx, pgb.db)
- return bestHash, pgb.replaceCancelError(err)
+ _, bestHash, err := retrieveBestBlockHeight(ctx, pgb.db)
+ return bestHash.String(), pgb.replaceCancelError(err)
}
// HeightHashDBLegacy queries the blocks table for the best block's height and
@@ -1044,8 +1072,8 @@ func (pgb *ChainDB) HashDBLegacy() (string, error) {
func (pgb *ChainDB) HeightHashDBLegacy() (uint64, string, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- height, hash, _, err := RetrieveBestBlockHeight(ctx, pgb.db)
- return height, hash, pgb.replaceCancelError(err)
+ height, hash, err := retrieveBestBlockHeight(ctx, pgb.db)
+ return height, hash.String(), pgb.replaceCancelError(err)
}
// Height is a getter for ChainDB.bestBlock.height.
@@ -1076,7 +1104,7 @@ func (pgb *ChainDB) GetBestBlockHash() (string, error) {
func (block *BestBlock) HashStr() string {
block.mtx.RLock()
defer block.mtx.RUnlock()
- return block.hash
+ return block.hash.String()
}
// Hash uses the last stored block hash.
@@ -1089,14 +1117,14 @@ func (block *BestBlock) Hash() *chainhash.Hash {
func (pgb *ChainDB) BestBlock() (*chainhash.Hash, int64) {
pgb.bestBlock.mtx.RLock()
defer pgb.bestBlock.mtx.RUnlock()
- hash, _ := chainhash.NewHashFromStr(pgb.bestBlock.hash)
- return hash, pgb.bestBlock.height
+ hash := chainhash.Hash(pgb.bestBlock.hash)
+ return &hash, pgb.bestBlock.height
}
func (pgb *ChainDB) BestBlockStr() (string, int64) {
pgb.bestBlock.mtx.RLock()
defer pgb.bestBlock.mtx.RUnlock()
- return pgb.bestBlock.hash, pgb.bestBlock.height
+ return pgb.bestBlock.hash.String(), pgb.bestBlock.height
}
// BestBlockHash is a getter for ChainDB.bestBlock.hash.
@@ -1109,21 +1137,29 @@ func (pgb *ChainDB) BestBlockHashStr() string {
return pgb.bestBlock.HashStr()
}
-// BlockHeight queries the DB for the height of the specified hash.
-func (pgb *ChainDB) BlockHeight(hash string) (int64, error) {
+func (pgb *ChainDB) blockHeight(hash dbtypes.ChainHash) (int64, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- height, err := RetrieveBlockHeight(ctx, pgb.db, hash)
+ height, err := retrieveBlockHeight(ctx, pgb.db, hash)
return height, pgb.replaceCancelError(err)
}
+// BlockHeight queries the DB for the height of the specified hash.
+func (pgb *ChainDB) BlockHeight(hash string) (int64, error) {
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return 0, err
+ }
+ return pgb.blockHeight(ch)
+}
+
// BlockHash queries the DB for the hash of the mainchain block at the given
// height.
func (pgb *ChainDB) BlockHash(height int64) (string, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- hash, err := RetrieveBlockHash(ctx, pgb.db, height)
- return hash, pgb.replaceCancelError(err)
+ hash, err := retrieveBlockHash(ctx, pgb.db, height)
+ return hash.String(), pgb.replaceCancelError(err)
}
// BlockTimeByHeight queries the DB for the time of the mainchain block at the
@@ -1131,16 +1167,20 @@ func (pgb *ChainDB) BlockHash(height int64) (string, error) {
func (pgb *ChainDB) BlockTimeByHeight(height int64) (int64, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- time, err := RetrieveBlockTimeByHeight(ctx, pgb.db, height)
+ time, err := retrieveBlockTimeByHeight(ctx, pgb.db, height)
return time.UNIX(), pgb.replaceCancelError(err)
}
// VotesInBlock returns the number of votes mined in the block with the
// specified hash.
func (pgb *ChainDB) VotesInBlock(hash string) (int16, error) {
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- voters, err := RetrieveBlockVoteCount(ctx, pgb.db, hash)
+ voters, err := retrieveBlockVoteCount(ctx, pgb.db, ch)
if err != nil {
err = pgb.replaceCancelError(err)
log.Errorf("Unable to get block voter count for hash %s: %v", hash, err)
@@ -1163,48 +1203,79 @@ func (pgb *ChainDB) VotesInBlock(hash string) (int16, error) {
// tx input indexes, and the corresponding funding tx output indexes, and an
// error value are returned.
func (pgb *ChainDB) SpendingTransactions(fundingTxID string) ([]string, []uint32, []uint32, error) {
+ ch, err := chainHashFromStr(fundingTxID)
+ if err != nil {
+ return nil, nil, nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, spendingTxns, vinInds, voutInds, err := RetrieveSpendingTxsByFundingTx(ctx, pgb.db, fundingTxID)
- return spendingTxns, vinInds, voutInds, pgb.replaceCancelError(err)
+ _, spendingTxns, vinInds, voutInds, err := retrieveSpendingTxsByFundingTx(ctx, pgb.db, ch)
+ txStrs := make([]string, len(spendingTxns))
+ for i := range spendingTxns {
+ txStrs[i] = spendingTxns[i].String()
+ }
+ return txStrs, vinInds, voutInds, pgb.replaceCancelError(err)
}
// SpendingTransaction returns the transaction that spends the specified
// transaction outpoint, if it is spent. The spending transaction hash, input
// index, tx tree, and an error value are returned.
-func (pgb *ChainDB) SpendingTransaction(fundingTxID string,
- fundingTxVout uint32) (string, uint32, int8, error) {
+func (pgb *ChainDB) SpendingTransaction(fundingTxID string, fundingTxVout uint32) (string, uint32, error) {
+ ch, err := chainHashFromStr(fundingTxID)
+ if err != nil {
+ return "", 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, spendingTx, vinInd, tree, err := RetrieveSpendingTxByTxOut(ctx, pgb.db, fundingTxID, fundingTxVout)
- return spendingTx, vinInd, tree, pgb.replaceCancelError(err)
+ _, spendingTx, vinInd, err := retrieveSpendingTxByTxOut(ctx, pgb.db, ch, fundingTxVout)
+ return spendingTx.String(), vinInd, pgb.replaceCancelError(err)
}
// BlockTransactions retrieves all transactions in the specified block, their
// indexes in the block, their tree, and an error value.
func (pgb *ChainDB) BlockTransactions(blockHash string) ([]string, []uint32, []int8, error) {
+ ch, err := chainHashFromStr(blockHash)
+ if err != nil {
+ return nil, nil, nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, blockTransactions, blockInds, trees, _, err := RetrieveTxsByBlockHash(ctx, pgb.db, blockHash)
- return blockTransactions, blockInds, trees, pgb.replaceCancelError(err)
+ blockTransactions, blockInds, trees, _, err := retrieveTxsByBlockHash(ctx, pgb.db, ch)
+ txStrs := make([]string, len(blockTransactions))
+ for i := range blockTransactions {
+ txStrs[i] = blockTransactions[i].String()
+ }
+ return txStrs, blockInds, trees, pgb.replaceCancelError(err)
}
// Transaction retrieves all rows from the transactions table for the given
// transaction hash.
func (pgb *ChainDB) Transaction(txHash string) ([]*dbtypes.Tx, error) {
+ ch, err := chainHashFromStr(txHash)
+ if err != nil {
+ return nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, dbTxs, err := RetrieveDbTxsByHash(ctx, pgb.db, txHash)
+ _, dbTxs, err := retrieveDbTxsByHash(ctx, pgb.db, ch)
return dbTxs, pgb.replaceCancelError(err)
}
// BlockMissedVotes retrieves the ticket IDs for all missed votes in the
// specified block, and an error value.
func (pgb *ChainDB) BlockMissedVotes(blockHash string) ([]string, error) {
+ ch, err := chainHashFromStr(blockHash)
+ if err != nil {
+ return nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- mv, err := RetrieveMissedVotesInBlock(ctx, pgb.db, blockHash)
- return mv, pgb.replaceCancelError(err)
+ mv, err := retrieveMissedVotesInBlock(ctx, pgb.db, ch)
+ txStrs := make([]string, len(mv))
+ for i := range mv {
+ txStrs[i] = mv[i].String()
+ }
+ return txStrs, pgb.replaceCancelError(err)
}
// missedVotesForBlockRange retrieves the number of missed votes for the block
@@ -1220,37 +1291,52 @@ func (pgb *ChainDB) missedVotesForBlockRange(startHeight, endHeight int64) (int6
// vote but failed to do so (miss). There may be multiple since this consideres
// side chain blocks. See TicketMiss for a mainchain-only version. If the ticket
// never missed a vote, the returned error will be dbtypes.ErrNoResult.
+/*
func (pgb *ChainDB) TicketMisses(ticketHash string) ([]string, []int64, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- blockHashes, blockHeights, err := RetrieveMissesForTicket(ctx, pgb.db, ticketHash)
+ blockHashes, blockHeights, err := retrieveMissesForTicket(ctx, pgb.db, ticketHash)
return blockHashes, blockHeights, pgb.replaceCancelError(err)
}
+*/
// TicketMiss retrieves the mainchain block in which the specified ticket was
// called to vote but failed to do so (miss). If the ticket never missed a vote,
// the returned error will be dbtypes.ErrNoResult.
func (pgb *ChainDB) TicketMiss(ticketHash string) (string, int64, error) {
+ ch, err := chainHashFromStr(ticketHash)
+ if err != nil {
+ return "", 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- blockHash, blockHeight, err := RetrieveMissForTicket(ctx, pgb.db, ticketHash)
- return blockHash, blockHeight, pgb.replaceCancelError(err)
+ blockHash, blockHeight, err := retrieveMissForTicket(ctx, pgb.db, ch)
+ return blockHash.String(), blockHeight, pgb.replaceCancelError(err)
}
// PoolStatusForTicket retrieves the specified ticket's spend status and ticket
// pool status, and an error value.
func (pgb *ChainDB) PoolStatusForTicket(txid string) (dbtypes.TicketSpendType, dbtypes.TicketPoolStatus, error) {
+ ch, err := chainHashFromStr(txid)
+ if err != nil {
+ return 0, 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, spendType, poolStatus, err := RetrieveTicketStatusByHash(ctx, pgb.db, txid)
+ _, spendType, poolStatus, err := retrieveTicketStatusByHash(ctx, pgb.db, ch)
return spendType, poolStatus, pgb.replaceCancelError(err)
}
// VoutValue retrieves the value of the specified transaction outpoint in atoms.
+// TEST ONLY REMOVE
func (pgb *ChainDB) VoutValue(txID string, vout uint32) (uint64, error) {
+ ch, err := chainHashFromStr(txID)
+ if err != nil {
+ return 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- voutValue, err := RetrieveVoutValue(ctx, pgb.db, txID, vout)
+ voutValue, err := retrieveVoutValue(ctx, pgb.db, ch, vout)
if err != nil {
return 0, pgb.replaceCancelError(err)
}
@@ -1260,10 +1346,15 @@ func (pgb *ChainDB) VoutValue(txID string, vout uint32) (uint64, error) {
// VoutValues retrieves the values of each outpoint of the specified
// transaction. The corresponding indexes in the block and tx trees of the
// outpoints, and an error value are also returned.
+// TEST ONLY REMOVE
func (pgb *ChainDB) VoutValues(txID string) ([]uint64, []uint32, []int8, error) {
+ ch, err := chainHashFromStr(txID)
+ if err != nil {
+ return nil, nil, nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- voutValues, txInds, txTrees, err := RetrieveVoutValues(ctx, pgb.db, txID)
+ voutValues, txInds, txTrees, err := retrieveVoutValues(ctx, pgb.db, ch)
if err != nil {
return nil, nil, nil, pgb.replaceCancelError(err)
}
@@ -1274,10 +1365,14 @@ func (pgb *ChainDB) VoutValues(txID string) ([]uint64, []uint32, []int8, error)
// transaction. The index of the transaction within the block, the transaction
// index, and an error value are also returned.
func (pgb *ChainDB) TransactionBlock(txID string) (string, uint32, int8, error) {
+ ch, err := chainHashFromStr(txID)
+ if err != nil {
+ return "", 0, 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, blockHash, blockInd, tree, err := RetrieveTxByHash(ctx, pgb.db, txID)
- return blockHash, blockInd, tree, pgb.replaceCancelError(err)
+ _, blockHash, blockInd, tree, err := retrieveTxByHash(ctx, pgb.db, ch)
+ return blockHash.String(), blockInd, tree, pgb.replaceCancelError(err)
}
// AgendaVotes fetches the data used to plot a graph of votes cast per day per
@@ -1348,6 +1443,7 @@ func (pgb *ChainDB) AllAgendas() (map[string]dbtypes.MileStone, error) {
// NumAddressIntervals gets the number of unique time intervals for the
// specified grouping where there are entries in the addresses table for the
// given address.
+/*
func (pgb *ChainDB) NumAddressIntervals(addr string, grouping dbtypes.TimeBasedGrouping) (int64, error) {
if grouping >= dbtypes.NumIntervals {
return 0, fmt.Errorf("invalid time grouping %d", grouping)
@@ -1356,11 +1452,13 @@ func (pgb *ChainDB) NumAddressIntervals(addr string, grouping dbtypes.TimeBasedG
defer cancel()
return retrieveAddressTxsCount(ctx, pgb.db, addr, grouping.String())
}
+*/
// AddressMetrics returns the block time of the oldest transaction and the
// total count for all the transactions linked to the provided address grouped
// by years, months, weeks and days time grouping in seconds.
// This helps plot more meaningful address history graphs to the user.
+/*
func (pgb *ChainDB) AddressMetrics(addr string) (*dbtypes.AddressMetrics, error) {
_, err := stdaddr.DecodeAddress(addr, pgb.chainParams)
if err != nil {
@@ -1400,11 +1498,13 @@ func (pgb *ChainDB) AddressMetrics(addr string) (*dbtypes.AddressMetrics, error)
return &metrics, pgb.replaceCancelError(err)
}
+*/
// AddressTransactions retrieves a slice of *dbtypes.AddressRow for a given
// address and transaction type (i.e. all, credit, or debit) from the DB. Only
// the first N transactions starting from the offset element in the set of all
// txnType transactions.
+/*
func (pgb *ChainDB) AddressTransactions(address string, N, offset int64,
txnType dbtypes.AddrTxnViewType) (addressRows []*dbtypes.AddressRow, err error) {
_, err = stdaddr.DecodeAddress(address, pgb.chainParams)
@@ -1415,17 +1515,17 @@ func (pgb *ChainDB) AddressTransactions(address string, N, offset int64,
var addrFunc func(context.Context, *sql.DB, string, int64, int64) ([]*dbtypes.AddressRow, error)
switch txnType {
case dbtypes.AddrTxnCredit:
- addrFunc = RetrieveAddressCreditTxns
+ addrFunc = retrieveAddressCreditTxns
case dbtypes.AddrTxnAll:
- addrFunc = RetrieveAddressTxns
+ addrFunc = retrieveAddressTxns
case dbtypes.AddrTxnDebit:
- addrFunc = RetrieveAddressDebitTxns
+ addrFunc = retrieveAddressDebitTxns
case dbtypes.AddrMergedTxnDebit:
- addrFunc = RetrieveAddressMergedDebitTxns
+ addrFunc = retrieveAddressMergedDebitTxns
case dbtypes.AddrMergedTxnCredit:
- addrFunc = RetrieveAddressMergedCreditTxns
+ addrFunc = retrieveAddressMergedCreditTxns
case dbtypes.AddrMergedTxn:
- addrFunc = RetrieveAddressMergedTxns
+ addrFunc = retrieveAddressMergedTxns
default:
return nil, fmt.Errorf("unknown AddrTxnViewType %v", txnType)
}
@@ -1437,18 +1537,19 @@ func (pgb *ChainDB) AddressTransactions(address string, N, offset int64,
err = pgb.replaceCancelError(err)
return
}
+*/
// AddressTransactionsAll retrieves all non-merged main chain addresses table
// rows for the given address. There is presently a hard limit of 3 million rows
// that may be returned, which is more than 4x the count for the treasury
-// adddress as of mainnet block 521900.
+// address as of mainnet block 521900.
func (pgb *ChainDB) AddressTransactionsAll(address string) (addressRows []*dbtypes.AddressRow, err error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
const limit = 3000000
- addressRows, err = RetrieveAddressTxns(ctx, pgb.db, address, limit, 0)
- // addressRows, err = RetrieveAllMainchainAddressTxns(ctx, pgb.db, address)
+ addressRows, err = retrieveAddressTxns(ctx, pgb.db, address, limit, 0)
+ // addressRows, err = retrieveAllMainchainAddressTxns(ctx, pgb.db, address)
err = pgb.replaceCancelError(err)
return
}
@@ -1456,15 +1557,15 @@ func (pgb *ChainDB) AddressTransactionsAll(address string) (addressRows []*dbtyp
// AddressTransactionsAllMerged retrieves all merged (stakeholder-approved and
// mainchain only) addresses table rows for the given address. There is
// presently a hard limit of 3 million rows that may be returned, which is more
-// than 4x the count for the treasury adddress as of mainnet block 521900.
+// than 4x the count for the treasury address as of mainnet block 521900.
func (pgb *ChainDB) AddressTransactionsAllMerged(address string) (addressRows []*dbtypes.AddressRow, err error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
const limit = 3000000
- addressRows, err = RetrieveAddressMergedTxns(ctx, pgb.db, address, limit, 0)
+ addressRows, err = retrieveAddressMergedTxns(ctx, pgb.db, address, limit, 0)
// const onlyValidMainchain = true
- // _, addressRows, err = RetrieveAllAddressMergedTxns(ctx, pgb.db, address,
+ // _, addressRows, err = retrieveAllAddressMergedTxns(ctx, pgb.db, address,
// onlyValidMainchain)
err = pgb.replaceCancelError(err)
return
@@ -1639,9 +1740,13 @@ func (pgb *ChainDB) ticketPoolVisualization(interval dbtypes.TimeBasedGrouping)
// GetTicketInfo retrieves information about the pool and spend statuses, the
// purchase block, the lottery block, and the spending transaction.
func (pgb *ChainDB) GetTicketInfo(txid string) (*apitypes.TicketInfo, error) {
+ ch, err := chainHashFromStr(txid)
+ if err != nil {
+ return nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- spendStatus, poolStatus, purchaseBlock, lotteryBlock, spendTxid, err := RetrieveTicketInfoByHash(ctx, pgb.db, txid)
+ spendStatus, poolStatus, purchaseBlock, lotteryBlock, spendTxid, err := retrieveTicketInfoByHash(ctx, pgb.db, ch)
if err != nil {
return nil, pgb.replaceCancelError(err)
}
@@ -1655,18 +1760,20 @@ func (pgb *ChainDB) GetTicketInfo(txid string) (*apitypes.TicketInfo, error) {
}
if spendStatus == dbtypes.TicketRevoked {
status = spendStatus.String()
- revocation = &spendTxid
+ spendStr := spendTxid.String()
+ revocation = &spendStr
} else if spendStatus == dbtypes.TicketVoted {
- vote = &spendTxid
+ spendStr := spendTxid.String()
+ vote = &spendStr
}
if poolStatus == dbtypes.PoolStatusMissed {
- hash, height, err := RetrieveMissForTicket(ctx, pgb.db, txid)
+ hash, height, err := retrieveMissForTicket(ctx, pgb.db, ch)
if err != nil {
return nil, pgb.replaceCancelError(err)
}
lotteryBlock = &apitypes.TinyBlock{
- Hash: hash,
+ Hash: hash.String(),
Height: uint32(height),
}
}
@@ -1691,7 +1798,21 @@ func (pgb *ChainDB) TSpendVotes(tspendID *chainhash.Hash) (*dbtypes.TreasurySpen
return nil, fmt.Errorf("expected 1 tally, got %d", len(tspendVotesResult.Votes))
}
- tsv := dbtypes.TreasurySpendVotes(tspendVotesResult.Votes[0])
+ v0 := tspendVotesResult.Votes[0]
+
+ hash, err := chainHashFromStr(v0.Hash)
+ if err != nil {
+ return nil, err
+ }
+
+ tsv := dbtypes.TreasurySpendVotes{
+ Hash: hash,
+ Expiry: v0.Expiry,
+ VoteStart: v0.VoteStart,
+ VoteEnd: v0.VoteEnd,
+ YesVotes: v0.YesVotes,
+ NoVotes: v0.NoVotes,
+ }
return &tsv, nil
}
@@ -1901,7 +2022,7 @@ func (pgb *ChainDB) AddressBalance(address string) (bal *dbtypes.AddressBalance,
// Cache is empty or stale, so query the DB.
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- bal, err = RetrieveAddressBalance(ctx, pgb.db, address)
+ bal, err = retrieveAddressBalance(ctx, pgb.db, address)
if err != nil {
err = pgb.replaceCancelError(err)
return
@@ -2040,11 +2161,11 @@ func (pgb *ChainDB) retrieveMergedTxnCount(addr string, txnView dbtypes.AddrTxnV
var err error
switch txnView {
case dbtypes.AddrMergedTxnDebit:
- count, err = CountMergedSpendingTxns(ctx, pgb.db, addr)
+ count, err = countMergedSpendingTxns(ctx, pgb.db, addr)
case dbtypes.AddrMergedTxnCredit:
- count, err = CountMergedFundingTxns(ctx, pgb.db, addr)
+ count, err = countMergedFundingTxns(ctx, pgb.db, addr)
case dbtypes.AddrMergedTxn:
- count, err = CountMergedTxns(ctx, pgb.db, addr)
+ count, err = countMergedTxns(ctx, pgb.db, addr)
default:
return 0, fmt.Errorf("retrieveMergedTxnCount: requested count for non-merged view")
}
@@ -2342,7 +2463,7 @@ FUNDING_TX_DUPLICATE_CHECK:
// database as soon as they are mined and thus we need to be careful
// to not include those transactions in our list.
for _, b := range addrData.Transactions {
- if f.Hash.String() == b.TxID && f.Index == b.InOutID {
+ if dbtypes.ChainHash(f.Hash) == b.TxID && f.Index == b.InOutID {
continue FUNDING_TX_DUPLICATE_CHECK
}
}
@@ -2357,7 +2478,7 @@ FUNDING_TX_DUPLICATE_CHECK:
}
if txnType == dbtypes.AddrTxnAll || txnType == dbtypes.AddrTxnCredit || txnType == dbtypes.AddrUnspentTxn {
addrTx := &dbtypes.AddressTx{
- TxID: fundingTx.Hash().String(),
+ TxID: dbtypes.ChainHash(fundingTx.Hash()),
TxType: txhelpers.DetermineTxTypeString(fundingTx.Tx),
InOutID: f.Index,
Time: dbtypes.NewTimeDefFromUNIX(fundingTx.MemPoolTime),
@@ -2386,7 +2507,7 @@ SPENDING_TX_DUPLICATE_CHECK:
// database as soon as they are mined and thus we need to be careful
// to not include those transactions in our list.
for _, b := range addrData.Transactions {
- if f.TxSpending.String() == b.TxID && f.InputIndex == int(b.InOutID) {
+ if dbtypes.ChainHash(f.TxSpending) == b.TxID && f.InputIndex == int(b.InOutID) {
continue SPENDING_TX_DUPLICATE_CHECK
}
}
@@ -2403,29 +2524,30 @@ SPENDING_TX_DUPLICATE_CHECK:
// The total send amount must be looked up from the previous
// outpoint because vin:i valuein is not reliable from dcrd.
prevhash := spendingTx.Tx.TxIn[f.InputIndex].PreviousOutPoint.Hash
- strprevhash := prevhash.String()
previndex := spendingTx.Tx.TxIn[f.InputIndex].PreviousOutPoint.Index
valuein := addressUTXOs.TxnsStore[prevhash].Tx.TxOut[previndex].Value
// Look through old transactions and set the spending transactions'
// matching transaction fields.
for _, dbTxn := range addrData.Transactions {
- if dbTxn.TxID == strprevhash && dbTxn.InOutID == previndex && dbTxn.IsFunding {
- dbTxn.MatchedTx = spendingTx.Hash().String()
+ if dbTxn.TxID == dbtypes.ChainHash(prevhash) && dbTxn.InOutID == previndex && dbTxn.IsFunding {
+ spendHash := dbtypes.ChainHash(spendingTx.Hash())
+ dbTxn.MatchedTx = &spendHash
dbTxn.MatchedTxIndex = uint32(f.InputIndex)
}
}
if txnType == dbtypes.AddrTxnAll || txnType == dbtypes.AddrTxnDebit {
+ prevCH := dbtypes.ChainHash(prevhash)
addrTx := &dbtypes.AddressTx{
- TxID: spendingTx.Hash().String(),
+ TxID: dbtypes.ChainHash(spendingTx.Hash()),
TxType: txhelpers.DetermineTxTypeString(spendingTx.Tx),
InOutID: uint32(f.InputIndex),
Time: dbtypes.NewTimeDefFromUNIX(spendingTx.MemPoolTime),
FormattedSize: humanize.Bytes(uint64(spendingTx.Tx.SerializeSize())),
Total: txhelpers.TotalOutFromMsgTx(spendingTx.Tx).ToCoin(),
SentTotal: dcrutil.Amount(valuein).ToCoin(),
- MatchedTx: strprevhash,
+ MatchedTx: &prevCH,
MatchedTxIndex: previndex,
}
addrData.Transactions = append(addrData.Transactions, addrTx)
@@ -2451,19 +2573,23 @@ SPENDING_TX_DUPLICATE_CHECK:
// given transaction hash. Transactions in valid and mainchain blocks are chosen
// first.
func (pgb *ChainDB) DbTxByHash(txid string) (*dbtypes.Tx, error) {
+ ch, err := chainHashFromStr(txid)
+ if err != nil {
+ return nil, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- _, dbTx, err := RetrieveDbTxByHash(ctx, pgb.db, txid)
+ _, dbTx, err := retrieveDbTxByHash(ctx, pgb.db, ch)
return dbTx, pgb.replaceCancelError(err)
}
-// FundingOutpointIndxByVinID retrieves the the transaction output index of the
+// fundingOutpointIndxByVinID retrieves the the transaction output index of the
// previous outpoint for a transaction input specified by row ID in the vins
// table, which stores previous outpoints for each vin.
-func (pgb *ChainDB) FundingOutpointIndxByVinID(id uint64) (uint32, error) {
+func (pgb *ChainDB) fundingOutpointIndxByVinID(id uint64) (uint32, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- ind, err := RetrieveFundingOutpointIndxByVinID(ctx, pgb.db, id)
+ ind, err := retrieveFundingOutpointIndxByVinID(ctx, pgb.db, id)
return ind, pgb.replaceCancelError(err)
}
@@ -2479,9 +2605,10 @@ func (pgb *ChainDB) FillAddressTransactions(addrInfo *dbtypes.AddressInfo) error
var numUnconfirmed int64
for i, txn := range addrInfo.Transactions {
+ hashStr := txn.TxID.String()
// Retrieve the most valid, most mainchain, and most recent tx with this
// hash. This means it prefers mainchain and valid blocks first.
- dbTx, err := pgb.DbTxByHash(txn.TxID)
+ dbTx, err := pgb.DbTxByHash(hashStr) // only need: Size, Sent, BlockTime, BlockHeight, VinDbIds
if err != nil {
return err
}
@@ -2500,24 +2627,24 @@ func (pgb *ChainDB) FillAddressTransactions(addrInfo *dbtypes.AddressInfo) error
// matching tx hash already present. During the next database
// restructuring we may want to consider including matching tx index
// along with matching tx hash in the addresses table.
- if txn.MatchedTx != `` {
+ if txn.MatchedTx != nil {
if !txn.IsFunding {
// Spending transaction: lookup the previous outpoint's txout
- // index by the vins table row ID.
- idx, err := pgb.FundingOutpointIndxByVinID(dbTx.VinDbIds[txn.InOutID])
+ // index by the vins table row ID. NOTE: could also do by txn.IxID and txn.InOutID
+ idx, err := pgb.fundingOutpointIndxByVinID(dbTx.VinDbIds[txn.InOutID])
if err != nil {
log.Warnf("Matched Transaction Lookup failed for %s:%d: id: %d: %v",
- txn.TxID, txn.InOutID, txn.InOutID, err)
+ hashStr, txn.InOutID, txn.InOutID, err)
} else {
addrInfo.Transactions[i].MatchedTxIndex = idx
}
} else {
// Funding transaction: lookup by the matching (spending) tx
// hash and tx index.
- _, idx, _, err := pgb.SpendingTransaction(txn.TxID, txn.InOutID)
+ _, idx, err := pgb.SpendingTransaction(hashStr, txn.InOutID)
if err != nil {
log.Warnf("Matched Transaction Lookup failed for %s:%d: %v",
- txn.TxID, txn.InOutID, err)
+ hashStr, txn.InOutID, err)
} else {
addrInfo.Transactions[i].MatchedTxIndex = idx
}
@@ -2630,7 +2757,7 @@ func (pgb *ChainDB) AddressTransactionDetails(addr string, count, skip int64,
txsShort := make([]*apitypes.AddressTxShort, 0, len(txs))
for i := range txs {
txsShort = append(txsShort, &apitypes.AddressTxShort{
- TxID: txs[i].TxID,
+ TxID: txs[i].TxID.String(),
Time: apitypes.TimeAPI{S: txs[i].Time},
Value: txs[i].Total,
Confirmations: int64(txs[i].Confirmations),
@@ -2660,11 +2787,13 @@ func (pgb *ChainDB) UpdateChainState(blockChainInfo *chainjson.GetBlockChainInfo
ruleChangeInterval := int64(pgb.chainParams.RuleChangeActivationInterval)
+ blockHash, _ := chainHashFromStr(blockChainInfo.BestBlockHash)
+
chainInfo := dbtypes.BlockChainData{
Chain: blockChainInfo.Chain,
SyncHeight: blockChainInfo.SyncHeight,
BestHeight: blockChainInfo.Blocks,
- BestBlockHash: blockChainInfo.BestBlockHash,
+ BestBlockHash: blockHash,
Difficulty: blockChainInfo.Difficulty,
VerificationProgress: blockChainInfo.VerificationProgress,
ChainWork: blockChainInfo.ChainWork,
@@ -2761,7 +2890,7 @@ func (pgb *ChainDB) Store(blockData *blockdata.BlockData, msgBlock *wire.MsgBloc
// PurgeBestBlocks deletes all data for the N best blocks in the DB.
func (pgb *ChainDB) PurgeBestBlocks(N int64) (*dbtypes.DeletionSummary, int64, error) {
- res, height, _, err := DeleteBlocks(pgb.ctx, N, pgb.db)
+ res, height, _, err := deleteBlocks(pgb.ctx, N, pgb.db)
if err != nil {
return nil, height, pgb.replaceCancelError(err)
}
@@ -2989,21 +3118,6 @@ func (pgb *ChainDB) coinSupply(charts *cache.ChartData) (*sql.Rows, func(), erro
return rows, cancel, nil
}
-// txPerDay fetches the tx-per-day chart data from retrieveTxPerDay.
-func (pgb *ChainDB) txPerDay(timeArr []dbtypes.TimeDef, txCountArr []uint64) (
- []dbtypes.TimeDef, []uint64, error) {
- ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
- defer cancel()
-
- var err error
- timeArr, txCountArr, err = retrieveTxPerDay(ctx, pgb.db, timeArr, txCountArr)
- if err != nil {
- err = fmt.Errorf("txPerDay: %w", pgb.replaceCancelError(err))
- }
-
- return timeArr, txCountArr, err
-}
-
// blockFees sets or updates a series of per-block fees.
// This is the Fetcher half of a pair that make up a cache.ChartUpdater. The
// Appender half is appendBlockFees.
@@ -3145,7 +3259,7 @@ func (pgb *ChainDB) PowerlessTickets() (*apitypes.PowerlessTickets, error) {
// table for each row of vins in the vin_db_ids array. The returns are the
// number of vins updated, the vin row IDs array, the vouts row IDs array, and
// an error value.
-func (pgb *ChainDB) SetVinsMainchainByBlock(blockHash string) (int64, []dbtypes.UInt64Array, []dbtypes.UInt64Array, error) {
+func (pgb *ChainDB) setVinsMainchainByBlock(blockHash dbtypes.ChainHash) (int64, []dbtypes.UInt64Array, []dbtypes.UInt64Array, error) {
// The queries in this function should not timeout or (probably) canceled,
// so use a background context.
ctx := context.Background()
@@ -3153,7 +3267,7 @@ func (pgb *ChainDB) SetVinsMainchainByBlock(blockHash string) (int64, []dbtypes.
// Get vins DB IDs from the transactions table, for each tx in the block.
onlyRegularTxns := false
vinDbIDsBlk, voutDbIDsBlk, areMainchain, err :=
- RetrieveTxnsVinsVoutsByBlock(ctx, pgb.db, blockHash, onlyRegularTxns)
+ retrieveTxnsVinsVoutsByBlock(ctx, pgb.db, blockHash, onlyRegularTxns)
if err != nil {
return 0, nil, nil, fmt.Errorf("unable to retrieve vin data for block %s: %w", blockHash, err)
}
@@ -3200,50 +3314,17 @@ func (pgb *ChainDB) setVinsMainchainOneTxn(vinDbIDs dbtypes.UInt64Array,
return rowsUpdated, nil
}
-// PkScriptByVinID retrieves the pkScript and script version for the row of the
-// vouts table corresponding to the previous output of the vin specified by row
-// ID of the vins table.
-func (pgb *ChainDB) PkScriptByVinID(id uint64) (pkScript []byte, ver uint16, err error) {
- ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
- defer cancel()
- pks, ver, err := RetrievePkScriptByVinID(ctx, pgb.db, id)
- return pks, ver, pgb.replaceCancelError(err)
-}
-
-// PkScriptByVoutID retrieves the pkScript and script version for the row of the
-// vouts table specified by the row ID id.
-func (pgb *ChainDB) PkScriptByVoutID(id uint64) (pkScript []byte, ver uint16, err error) {
- ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
- defer cancel()
- pks, ver, err := RetrievePkScriptByVoutID(ctx, pgb.db, id)
- return pks, ver, pgb.replaceCancelError(err)
-}
-
// VinsForTx returns a slice of dbtypes.VinTxProperty values for each vin
-// referenced by the transaction dbTx, along with the pkScript and script
-// version for the corresponding previous outpoints.
-func (pgb *ChainDB) VinsForTx(dbTx *dbtypes.Tx) ([]dbtypes.VinTxProperty, []string, []uint16, error) {
- // Retrieve the pkScript and script version for the previous outpoint of
- // each vin.
- prevPkScripts := make([]string, 0, len(dbTx.VinDbIds))
- versions := make([]uint16, 0, len(dbTx.VinDbIds))
- for _, id := range dbTx.VinDbIds {
- pkScript, ver, err := pgb.PkScriptByVinID(id)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("PkScriptByVinID: %w", err)
- }
- prevPkScripts = append(prevPkScripts, hex.EncodeToString(pkScript))
- versions = append(versions, ver)
- }
-
+// referenced by the transaction dbTx.
+func (pgb *ChainDB) VinsForTx(dbTx *dbtypes.Tx) ([]dbtypes.VinTxProperty, error) {
// Retrieve the vins row data.
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- vins, err := RetrieveVinsByIDs(ctx, pgb.db, dbTx.VinDbIds)
+ vins, err := retrieveVinsByIDs(ctx, pgb.db, dbTx.VinDbIds)
if err != nil {
- err = fmt.Errorf("RetrieveVinsByIDs: %w", err)
+ err = fmt.Errorf("retrieveVinsByIDs: %w", err)
}
- return vins, prevPkScripts, versions, pgb.replaceCancelError(err)
+ return vins, pgb.replaceCancelError(err)
}
// VoutsForTx returns a slice of dbtypes.Vout values for each vout referenced by
@@ -3251,30 +3332,32 @@ func (pgb *ChainDB) VinsForTx(dbTx *dbtypes.Tx) ([]dbtypes.VinTxProperty, []stri
func (pgb *ChainDB) VoutsForTx(dbTx *dbtypes.Tx) ([]dbtypes.Vout, error) {
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- vouts, err := RetrieveVoutsByIDs(ctx, pgb.db, dbTx.VoutDbIds)
+ vouts, err := retrieveVoutsByIDs(ctx, pgb.db, dbTx.VoutDbIds)
return vouts, pgb.replaceCancelError(err)
}
-func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHash string, blocksMoved int64) {
- tipHash = pgb.BestBlockHashStr()
+func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHashStr string, blocksMoved int64) {
+ tipHash := dbtypes.ChainHash(*pgb.BestBlockHash())
+ tipHashStr = tipHash.String()
addresses := make(map[string]struct{})
var txnsUpdated, vinsUpdated, votesUpdated, ticketsUpdated, treasuryTxnsUpdates, addrsUpdated int64
- for tipHash != mainRoot {
+ for tipHashStr != mainRoot {
+ log.Infof("TipToSideChain: tipHashStr = %v, mainRoot = %v", tipHashStr, mainRoot)
// 1. Block. Set is_mainchain=false on the tip block, return hash of
// previous block.
now := time.Now()
- previousHash, err := SetMainchainByBlockHash(pgb.db, tipHash, false)
+ previousHash, err := setMainchainByBlockHash(pgb.db, tipHash, false)
if err != nil {
log.Errorf("Failed to set block %s as a sidechain block: %v",
tipHash, err)
}
blocksMoved++
- log.Debugf("SetMainchainByBlockHash: %v", time.Since(now))
+ log.Debugf("SetMainchainByBlockHash: %v / tip = %v , prev = %v", time.Since(now), tipHash, previousHash)
// 2. Transactions. Set is_mainchain=false on all transactions in the
// tip block, returning only the number of transactions updated.
now = time.Now()
- rowsUpdated, _, err := UpdateTransactionsMainchain(pgb.db, tipHash, false)
+ rowsUpdated, _, err := updateTransactionsMainchain(pgb.db, tipHash, false)
if err != nil {
log.Errorf("Failed to set transactions in block %s as sidechain: %v",
tipHash, err)
@@ -3294,7 +3377,7 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHash string, blocksMoved
// 4. Vins. Set is_mainchain=false on all vins, returning the number of
// vins updated, the vins table row IDs, and the vouts table row IDs.
now = time.Now()
- rowsUpdated, vinDbIDsBlk, voutDbIDsBlk, err := pgb.SetVinsMainchainByBlock(tipHash) // isMainchain from transactions table
+ rowsUpdated, vinDbIDsBlk, voutDbIDsBlk, err := pgb.setVinsMainchainByBlock(tipHash) // isMainchain from transactions table
if err != nil {
log.Errorf("Failed to set vins in block %s as sidechain: %v",
tipHash, err)
@@ -3305,9 +3388,9 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHash string, blocksMoved
// 5. Addresses. Set valid_mainchain=false on all addresses rows
// corresponding to the spending transactions specified by the vins DB
// row IDs, and the funding transactions specified by the vouts DB row
- // IDs. The IDs come for free via RetrieveTxnsVinsVoutsByBlock.
+ // IDs. The IDs come for free via retrieveTxnsVinsVoutsByBlock.
now = time.Now()
- addrs, numAddrSpending, numAddrFunding, err := UpdateAddressesMainchainByIDs(pgb.db,
+ addrs, numAddrSpending, numAddrFunding, err := updateAddressesMainchainByIDs(pgb.db,
vinDbIDsBlk, voutDbIDsBlk, false)
if err != nil {
log.Errorf("Failed to set addresses rows in block %s as sidechain: %v",
@@ -3321,7 +3404,7 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHash string, blocksMoved
// 6. Votes. Sets is_mainchain=false on all votes in the tip block.
now = time.Now()
- rowsUpdated, err = UpdateVotesMainchain(pgb.db, tipHash, false)
+ rowsUpdated, err = updateVotesMainchain(pgb.db, tipHash, false)
if err != nil {
log.Errorf("Failed to set votes in block %s as sidechain: %v",
tipHash, err)
@@ -3331,7 +3414,7 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHash string, blocksMoved
// 7. Tickets. Sets is_mainchain=false on all tickets in the tip block.
now = time.Now()
- rowsUpdated, err = UpdateTicketsMainchain(pgb.db, tipHash, false)
+ rowsUpdated, err = updateTicketsMainchain(pgb.db, tipHash, false)
if err != nil {
log.Errorf("Failed to set tickets in block %s as sidechain: %v",
tipHash, err)
@@ -3341,7 +3424,7 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHash string, blocksMoved
// 8. Treasury. Sets is_mainchain=false on all entries in the tip block.
now = time.Now()
- rowsUpdated, err = UpdateTreasuryMainchain(pgb.db, tipHash, false)
+ rowsUpdated, err = updateTreasuryMainchain(pgb.db, tipHash, false)
if err != nil {
log.Errorf("Failed to set tickets in block %s as sidechain: %v",
tipHash, err)
@@ -3351,9 +3434,10 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (tipHash string, blocksMoved
// move on to next block
tipHash = previousHash
+ tipHashStr = tipHash.String()
pgb.bestBlock.mtx.Lock()
- pgb.bestBlock.height, err = pgb.BlockHeight(tipHash)
+ pgb.bestBlock.height, err = pgb.blockHeight(tipHash)
if err != nil {
log.Errorf("Failed to retrieve block height for %s", tipHash)
}
@@ -3387,7 +3471,7 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
// winningTickets is only set during initial chain sync.
// Retrieve it from the stakeDB.
var tpi *apitypes.TicketPoolInfo
- var winningTickets []string
+ var winningTickets []dbtypes.ChainHash
if isMainchain {
var found bool
tpi, found = pgb.stakeDB.PoolInfo(blockHash)
@@ -3399,7 +3483,13 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
err = fmt.Errorf("TicketPoolInfo height mismatch. expected %d. found %d", msgBlock.Header.Height, tpi.Height)
return
}
- winningTickets = tpi.Winners
+ winningTickets = make([]dbtypes.ChainHash, len(tpi.Winners))
+ for i := range winningTickets {
+ winningTickets[i], err = chainHashFromStr(tpi.Winners[i])
+ if err != nil {
+ return
+ }
+ }
}
// Convert the wire.MsgBlock to a dbtypes.Block.
@@ -3408,7 +3498,7 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
// Get the previous winners (stake DB pool info cache has this info). If the
// previous block is side chain, stakedb will not have the
// winners/validators. Since Validators are only used to identify misses in
- // InsertVotes, we will leave the Validators empty and assume there are no
+ // insertVotes, we will leave the Validators empty and assume there are no
// misses. If this block becomes main chain at some point via a
// reorganization, its table entries will be updated appropriately, which
// will include inserting any misses since the stakeDB will then include the
@@ -3417,14 +3507,20 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
// imported side chain blocks over to main chain.
prevBlockHash := msgBlock.Header.PrevBlock
- var winners []string
+ var winners []dbtypes.ChainHash
if isMainchain && !bytes.Equal(zeroHash[:], prevBlockHash[:]) {
lastTpi, found := pgb.stakeDB.PoolInfo(prevBlockHash)
if !found {
err = fmt.Errorf("stakedb.PoolInfo failed for block %s", blockHash)
return
}
- winners = lastTpi.Winners
+ winners = make([]dbtypes.ChainHash, len(lastTpi.Winners))
+ for i := range winners {
+ winners[i], err = chainHashFromStr(lastTpi.Winners[i])
+ if err != nil {
+ return
+ }
+ }
}
// Wrap the message block with newly winning tickets and the tickets
@@ -3511,9 +3607,9 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
// Store the block now that it has all if its transaction row IDs.
var blockDbID uint64
- blockDbID, err = InsertBlock(pgb.db, dbBlock, isValid, isMainchain, pgb.dupChecks)
+ blockDbID, err = insertBlock(pgb.db, dbBlock, isValid, isMainchain, pgb.dupChecks)
if err != nil {
- log.Error("InsertBlock:", err)
+ log.Error("insertBlock:", err)
return
}
pgb.lastBlock[blockHash] = blockDbID
@@ -3521,8 +3617,8 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
// Insert the block in the block_chain table with the previous block hash
// and an empty string for the next block hash, which may be updated when a
// new block extends this chain.
- err = InsertBlockPrevNext(pgb.db, blockDbID, dbBlock.Hash,
- dbBlock.PreviousHash, "")
+ err = insertBlockPrevNext(pgb.db, blockDbID, &dbBlock.Hash,
+ &dbBlock.PreviousHash, nil)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
log.Error("InsertBlockPrevNext:", err)
return
@@ -3533,7 +3629,7 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
// invalidated/disapproved the previous block, also update the is_valid
// columns for the previous block's entries in the following tables: blocks,
// vins, addresses, and transactions.
- err = pgb.UpdateLastBlock(msgBlock, isMainchain)
+ err = pgb.updateLastBlock(msgBlock, isMainchain)
if err != nil && !errors.Is(err, sql.ErrNoRows) && !errors.Is(err, dbtypes.ErrNoResult) {
err = fmt.Errorf("UpdateLastBlock: %w", err)
return
@@ -3548,7 +3644,7 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
// Insert the block stats.
if tpi != nil {
- err = InsertBlockStats(pgb.db, blockDbID, tpi)
+ err = insertBlockStats(pgb.db, blockDbID, tpi)
if err != nil {
err = fmt.Errorf("InsertBlockStats: %w", err)
return
@@ -3556,7 +3652,7 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
}
// Update the best block in the meta table.
- err = SetDBBestBlock(pgb.db, dbBlock.Hash, int64(dbBlock.Height))
+ err = setDBBestBlock(pgb.db, dbBlock.Hash, int64(dbBlock.Height))
if err != nil {
err = fmt.Errorf("SetDBBestBlock: %w", err)
return
@@ -3574,27 +3670,22 @@ func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, isValid, isMainchain,
return
}
-// SetDBBestBlock stores ChainDB's BestBlock data in the meta table. UNUSED
-func (pgb *ChainDB) SetDBBestBlock() error {
- pgb.bestBlock.mtx.RLock()
- bbHash, bbHeight := pgb.bestBlock.hash, pgb.bestBlock.height
- pgb.bestBlock.mtx.RUnlock()
- return SetDBBestBlock(pgb.db, bbHash, bbHeight)
-}
-
-// UpdateLastBlock set the previous block's next block hash in the block_chain
+// updateLastBlock set the previous block's next block hash in the block_chain
// table with this block's hash as it is next. If the current block's votes
// invalidated/disapproved the previous block, it also updates the is_valid
// columns for the previous block's entries in the following tables: blocks,
// vins, addresses, and transactions. If the previous block is not on the same
// chain as this block (as indicated by isMainchain), no updates are performed.
-func (pgb *ChainDB) UpdateLastBlock(msgBlock *wire.MsgBlock, isMainchain bool) error {
+func (pgb *ChainDB) updateLastBlock(msgBlock *wire.MsgBlock, isMainchain bool) error {
// Only update if last was not genesis, which is not in the table (implied).
- lastBlockHash := msgBlock.Header.PrevBlock
- if lastBlockHash == zeroHash {
+ lastBlockHashC := msgBlock.Header.PrevBlock
+ lastBlockHash := dbtypes.ChainHash(lastBlockHashC)
+ if lastBlockHash.IsZero() {
return nil
}
+ blockHash := dbtypes.ChainHash(msgBlock.Header.BlockHash())
+
// Ensure previous block has the same main/sidechain status. If the
// current block being added is side chain, do not invalidate the
// mainchain block or any of its components, or update the block_chain
@@ -3611,18 +3702,19 @@ func (pgb *ChainDB) UpdateLastBlock(msgBlock *wire.MsgBlock, isMainchain bool) e
if lastIsMainchain != isMainchain {
log.Debugf("Previous block %v is on the main chain, while current "+
"block %v is on a side chain. Not updating main chain parent.",
- lastBlockHash, msgBlock.BlockHash())
+ lastBlockHash, blockHash)
return nil
}
}
// Attempt to find the row id of the block hash in cache.
- lastBlockDbID, ok := pgb.lastBlock[lastBlockHash]
+ lastBlockDbID, ok := pgb.lastBlock[lastBlockHashC]
if !ok {
log.Debugf("The previous block %s for block %s not found in cache, "+
- "looking it up.", lastBlockHash, msgBlock.BlockHash())
+ "looking it up.", lastBlockHash, blockHash)
var err error
- lastBlockDbID, err = pgb.BlockChainDbIDNoCancel(lastBlockHash.String())
+ // lastBlockDbID, err = pgb.BlockChainDbIDNoCancel(lastBlockHash.String())
+ lastBlockDbID, err = pgb.blockChainDbID(pgb.ctx, lastBlockHash)
if err != nil {
return fmt.Errorf("unable to locate block %s in block_chain table: %w",
lastBlockHash, err)
@@ -3630,10 +3722,11 @@ func (pgb *ChainDB) UpdateLastBlock(msgBlock *wire.MsgBlock, isMainchain bool) e
}
// Update the previous block's next block hash in the block_chain table.
- err := UpdateBlockNext(pgb.db, lastBlockDbID, msgBlock.BlockHash().String())
+ err := updateBlockNext(pgb.db, lastBlockDbID, blockHash)
if err != nil {
- return fmt.Errorf("UpdateBlockNext: %w", err)
+ return fmt.Errorf("updateBlockNext(%v, %v): %w", lastBlockDbID, blockHash, err)
}
+ // NOTE: UpdateBlockNextByHash would work too and no DbID stuff, but slower.
// If the previous block is invalidated by this one: (1) update it's
// is_valid flag in the blocks table if needed, and (2) flag all the vins,
@@ -3644,14 +3737,14 @@ func (pgb *ChainDB) UpdateLastBlock(msgBlock *wire.MsgBlock, isMainchain bool) e
if !lastIsValid {
// Update the is_valid flag in the blocks table.
log.Infof("Previous block %s was DISAPPROVED by stakeholders.", lastBlockHash)
- err := UpdateLastBlockValid(pgb.db, lastBlockDbID, lastIsValid)
+ err := updateLastBlockValid(pgb.db, lastBlockDbID, lastIsValid)
if err != nil {
return fmt.Errorf("UpdateLastBlockValid: %w", err)
}
// For the transactions invalidated by this block, locate any vouts that
// reference them in vouts.spend_tx_row_id, and unset spend_tx_row_id.
- voutsUnset, err := clearVoutRegularSpendTxRowIDs(pgb.db, lastBlockHash.String())
+ voutsUnset, err := clearVoutRegularSpendTxRowIDs(pgb.db, lastBlockHash)
if err != nil {
return fmt.Errorf("clearVoutRegularSpendTxRowIDs: %w", err)
}
@@ -3659,13 +3752,13 @@ func (pgb *ChainDB) UpdateLastBlock(msgBlock *wire.MsgBlock, isMainchain bool) e
"regular transactions in invalidated block %s", voutsUnset, lastBlockHash)
// Update the is_valid flag for the last block's vins.
- err = UpdateLastVins(pgb.db, lastBlockHash.String(), lastIsValid, isMainchain)
+ err = updateLastVins(pgb.db, lastBlockHash, lastIsValid, isMainchain)
if err != nil {
return fmt.Errorf("UpdateLastVins: %w", err)
}
// Update the is_valid flag for the last block's regular transactions.
- _, _, err = UpdateTransactionsValid(pgb.db, lastBlockHash.String(), lastIsValid)
+ _, _, err = updateTransactionsValid(pgb.db, lastBlockHash, lastIsValid)
if err != nil {
return fmt.Errorf("UpdateTransactionsValid: %w", err)
}
@@ -3675,7 +3768,7 @@ func (pgb *ChainDB) UpdateLastBlock(msgBlock *wire.MsgBlock, isMainchain bool) e
// Update on addresses (cost=0.00..1012201.53 rows=1 width=181)
// -> Seq Scan on addresses (cost=0.00..1012201.53 rows=1 width=181)
// Filter: ((NOT is_funding) AND (tx_vin_vout_row_id = 13241234))
- addrs, err := UpdateLastAddressesValid(pgb.db, lastBlockHash.String(), lastIsValid)
+ addrs, err := updateLastAddressesValid(pgb.db, lastBlockHash, lastIsValid)
if err != nil {
return fmt.Errorf("UpdateLastAddressesValid: %w", err)
}
@@ -3711,8 +3804,8 @@ func (r *storeTxnsResult) Error() string {
// block's validity, Validators.
type MsgBlockPG struct {
*wire.MsgBlock
- WinningTickets []string
- Validators []string
+ WinningTickets []dbtypes.ChainHash
+ Validators []dbtypes.ChainHash
}
// storeTxns inserts all vins, vouts, and transactions. The VoutDbIds and
@@ -3759,8 +3852,7 @@ func (pgb *ChainDB) storeTxns(txns []*dbtypes.Tx, vouts [][]*dbtypes.Vout, vins
for it, Tx := range txns {
// Insert vouts, and collect AddressRows to add to address table for
// each output.
- Tx.VoutDbIds, dbAddressRows[it], err = InsertVoutsStmt(voutStmt,
- vouts[it], pgb.dupChecks, updateExistingRecords)
+ Tx.VoutDbIds, dbAddressRows[it], err = insertVoutsStmt(voutStmt, vouts[it])
if err != nil && !errors.Is(err, sql.ErrNoRows) {
err = fmt.Errorf("failure in InsertVoutsStmt: %w", err)
_ = dbTx.Rollback()
@@ -3773,8 +3865,7 @@ func (pgb *ChainDB) storeTxns(txns []*dbtypes.Tx, vouts [][]*dbtypes.Vout, vins
}
// Insert vins
- Tx.VinDbIds, err = InsertVinsStmt(vinStmt, vins[it], pgb.dupChecks,
- updateExistingRecords)
+ Tx.VinDbIds, err = insertVinsStmt(vinStmt, vins[it])
if err != nil && !errors.Is(err, sql.ErrNoRows) {
err = fmt.Errorf("failure in InsertVinsStmt: %w", err)
_ = dbTx.Rollback()
@@ -3787,7 +3878,7 @@ func (pgb *ChainDB) storeTxns(txns []*dbtypes.Tx, vouts [][]*dbtypes.Vout, vins
}
// Get the tx PK IDs for storage in the blocks, tickets, and votes table.
- txDbIDs, err = InsertTxnsDbTxn(dbTx, txns, pgb.dupChecks, updateExistingRecords)
+ txDbIDs, err = insertTxnsDbTxn(dbTx, txns, pgb.dupChecks, updateExistingRecords)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
err = fmt.Errorf("failure in InsertTxnsDbTxn: %w", err)
return
@@ -3833,7 +3924,7 @@ txns:
for iv := range dbTxVins[it] {
vin := &dbTxVins[it][iv]
- if txhelpers.IsZeroHashStr(vin.PrevTxHash) {
+ if vin.PrevTxHash.IsZero() {
continue
}
utxo := pgb.utxoCache.Peek(vin.PrevTxHash, vin.PrevTxIndex)
@@ -3915,12 +4006,12 @@ txns:
// For a side chain block, set Validators to an empty slice so that there
// will be no misses even if there are less than 5 votes. Any Validators
- // that do not match a spent ticket hash in InsertVotes are considered
+ // that do not match a spent ticket hash in insertVotes are considered
// misses. By listing no required validators, there are no misses. For side
// chain blocks, this is acceptable and necessary because the misses table
// does not record the block hash or main/side chain status.
if !isMainchain {
- msgBlock.Validators = []string{}
+ msgBlock.Validators = []dbtypes.ChainHash{}
}
// If processing stake transactions, insert tickets, votes, and misses. Also
@@ -3928,10 +4019,10 @@ txns:
// to the new votes, revokes, misses, and expires.
if isStake {
// Tickets: Insert new (unspent) tickets
- newTicketDbIDs, newTicketTx, err := InsertTickets(pgb.db, dbTransactions, txDbIDs,
+ newTicketDbIDs, newTicketTx, err := insertTickets(pgb.db, dbTransactions, txDbIDs,
pgb.dupChecks, updateExistingRecords)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
- log.Error("InsertTickets:", err)
+ log.Error("insertTickets:", err)
txRes.err = err
return txRes
}
@@ -3948,20 +4039,20 @@ txns:
// ticket spend info below.
// voteDbIDs, voteTxns, spentTicketHashes, ticketDbIDs, missDbIDs, err := ...
- var missesHashIDs map[string]uint64
- _, _, _, _, missesHashIDs, err = InsertVotes(pgb.db, dbTransactions, txDbIDs,
+ var missesHashIDs map[dbtypes.ChainHash]uint64
+ _, _, _, _, missesHashIDs, err = insertVotes(pgb.db, dbTransactions, txDbIDs,
pgb.unspentTicketCache, msgBlock, pgb.dupChecks, updateExistingRecords,
pgb.chainParams, pgb.ChainInfo())
if err != nil && !errors.Is(err, sql.ErrNoRows) {
- log.Error("InsertVotes:", err)
+ log.Error("insertVotes:", err)
txRes.err = err
return txRes
}
// Treasury txns.
- err = InsertTreasuryTxns(pgb.db, dbTransactions, pgb.dupChecks, updateExistingRecords)
+ err = insertTreasuryTxns(pgb.db, dbTransactions, pgb.dupChecks, updateExistingRecords)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
- log.Error("InsertTreasuryTxns:", err)
+ log.Error("insertTreasuryTxns:", err)
txRes.err = err
return txRes
}
@@ -3969,14 +4060,14 @@ txns:
// Get information for transactions spending tickets (votes and
// revokes), and the ticket DB row IDs themselves. Also return tickets
// table row IDs for newly spent tickets, if we are updating them as we
- // go (SetSpendingForTickets). CollectTicketSpendDBInfo uses ChainDB's
+ // go (SetSpendingForTickets). collectTicketSpendDBInfo uses ChainDB's
// ticket DB row ID cache (unspentTicketCache), and immediately expires
// any found entries for a main chain block.
spendingTxDbIDs, spendTypes, spentTicketHashes, ticketDbIDs, err :=
- pgb.CollectTicketSpendDBInfo(dbTransactions, txDbIDs,
+ pgb.collectTicketSpendDBInfo(dbTransactions, txDbIDs,
msgBlock.MsgBlock.STransactions, isMainchain)
if err != nil {
- log.Error("CollectTicketSpendDBInfo:", err)
+ log.Error("collectTicketSpendDBInfo:", err)
txRes.err = err
return txRes
}
@@ -3986,7 +4077,7 @@ txns:
// Classify and record the height of each ticket spend (vote or revoke).
// For revokes, further distinguish miss or expire.
- revokes := make(map[string]uint64)
+ revokes := make(map[dbtypes.ChainHash]uint64)
blockHeights := make([]int64, len(spentTicketHashes))
poolStatuses := make([]dbtypes.TicketPoolStatus, len(spentTicketHashes))
for iv := range spentTicketHashes {
@@ -3999,12 +4090,7 @@ txns:
case dbtypes.TicketRevoked:
revokes[spentTicketHashes[iv]] = ticketDbIDs[iv]
// Revoke reason
- h, err0 := chainhash.NewHashFromStr(spentTicketHashes[iv])
- if err0 != nil {
- log.Errorf("Invalid hash %v", spentTicketHashes[iv])
- continue // no info about spent ticket!
- }
- expired := pgb.stakeDB.BestNode.ExistsExpiredTicket(*h)
+ expired := pgb.stakeDB.BestNode.ExistsExpiredTicket(chainhash.Hash(spentTicketHashes[iv]))
if !expired {
poolStatuses[iv] = dbtypes.PoolStatusMissed
} else {
@@ -4014,7 +4100,7 @@ txns:
}
// Update tickets table with spending info.
- _, err = SetSpendingForTickets(pgb.db, ticketDbIDs, spendingTxDbIDs,
+ _, err = setSpendingForTickets(pgb.db, ticketDbIDs, spendingTxDbIDs,
blockHeights, spendTypes, poolStatuses)
if err != nil {
log.Error("SetSpendingForTickets:", err)
@@ -4023,9 +4109,9 @@ txns:
// Unspent not-live tickets are also either expired or missed.
// Missed but not revoked
- var unspentMissedTicketHashes []string
+ var unspentMissedTicketHashes []dbtypes.ChainHash
var missStatuses []dbtypes.TicketPoolStatus
- unspentMisses := make(map[string]struct{})
+ unspentMisses := make(map[dbtypes.ChainHash]struct{})
// missesHashIDs refers to lottery winners that did not vote.
for miss := range missesHashIDs {
if _, ok := revokes[miss]; !ok {
@@ -4037,7 +4123,7 @@ txns:
}
// Expired but not revoked
- unspentEnM := make([]string, len(unspentMissedTicketHashes))
+ unspentEnM := make([]dbtypes.ChainHash, len(unspentMissedTicketHashes))
// Start with the unspent misses and append unspent expires to get
// "unspent expired and missed".
copy(unspentEnM, unspentMissedTicketHashes)
@@ -4048,7 +4134,7 @@ txns:
// and not the revoked ones. Screen each ticket from MissedByBlock
// for the actual unspent expires.
if pgb.stakeDB.BestNode.ExistsExpiredTicket(missHash) {
- emHash := missHash.String()
+ emHash := dbtypes.ChainHash(missHash)
// Next check should not be unnecessary. Make sure not in
// unspent misses from above, and not just revoked.
_, justMissed := unspentMisses[emHash] // should be redundant
@@ -4077,7 +4163,7 @@ txns:
}
// Update status of the unspent expired and missed tickets.
- _, err = SetPoolStatusForTickets(pgb.db,
+ _, err = setPoolStatusForTickets(pgb.db,
unspentEnMRowIDs, missStatuses)
if err != nil {
log.Errorf("SetPoolStatusForTicketsByHash: %v", err)
@@ -4099,7 +4185,7 @@ txns:
// Insert each new funding AddressRow, absent MatchingTxHash (spending txn
// since these new address rows are *funding*).
- _, err = InsertAddressRowsDbTx(dbTx, dbAddressRowsFlat, pgb.dupChecks, updateExistingRecords)
+ _, err = insertAddressRowsDbTx(dbTx, dbAddressRowsFlat, pgb.dupChecks, updateExistingRecords)
if err != nil {
_ = dbTx.Rollback()
log.Error("InsertAddressRows:", err)
@@ -4128,7 +4214,7 @@ txns:
// Skip coinbase inputs (they are new coins and thus have no
// previous outpoint funding them).
- if bytes.Equal(zeroHashStringBytes, []byte(vin.PrevTxHash)) {
+ if vin.PrevTxHash.IsZero() {
continue
}
@@ -4207,13 +4293,13 @@ txns:
continue
}
for _, red := range swapTxns.Redemptions {
- err = InsertSwap(pgb.db, height, red)
+ err = insertSwap(pgb.db, height, red)
if err != nil {
log.Errorf("InsertSwap: %v", err)
}
}
for _, ref := range swapTxns.Refunds {
- err = InsertSwap(pgb.db, height, ref)
+ err = insertSwap(pgb.db, height, ref)
if err != nil {
log.Errorf("InsertSwap: %v", err)
}
@@ -4309,12 +4395,12 @@ func (pgb *ChainDB) flattenAddressRows(dbAddressRows [][]dbtypes.AddressRow, txn
return dbAddressRowsFlat
}
-// CollectTicketSpendDBInfo processes the stake transactions in msgBlock, which
+// collectTicketSpendDBInfo processes the stake transactions in msgBlock, which
// correspond to the transaction data in dbTxns, and extracts data for votes and
// revokes, including the spent ticket hash and DB row ID.
-func (pgb *ChainDB) CollectTicketSpendDBInfo(dbTxns []*dbtypes.Tx, txDbIDs []uint64,
+func (pgb *ChainDB) collectTicketSpendDBInfo(dbTxns []*dbtypes.Tx, txDbIDs []uint64,
msgTxns []*wire.MsgTx, isMainchain bool) (spendingTxDbIDs []uint64, spendTypes []dbtypes.TicketSpendType,
- ticketHashes []string, ticketDbIDs []uint64, err error) {
+ ticketHashes []dbtypes.ChainHash, ticketDbIDs []uint64, err error) {
// This only makes sense for stake transactions. Check that the number of
// dbTxns equals the number of STransactions in msgBlock.
// msgTxns := msgBlock.STransactions
@@ -4340,7 +4426,7 @@ func (pgb *ChainDB) CollectTicketSpendDBInfo(dbTxns []*dbtypes.Tx, txDbIDs []uin
// Ensure the transactions in dbTxns and msgBlock.STransactions correspond.
msgTx := msgTxns[i]
- if tx.TxID != msgTx.CachedTxHash().String() {
+ if tx.TxID != dbtypes.ChainHash(*msgTx.CachedTxHash()) {
err = fmt.Errorf("txid of dbtypes.Tx does not match that of msgTx")
return
} // comment this check
@@ -4356,7 +4442,7 @@ func (pgb *ChainDB) CollectTicketSpendDBInfo(dbTxns []*dbtypes.Tx, txDbIDs []uin
spendingTxDbIDs = append(spendingTxDbIDs, txDbIDs[i])
// ticket hash
- ticketHash := msgTx.TxIn[stakeSubmissionVinInd].PreviousOutPoint.Hash.String()
+ ticketHash := dbtypes.ChainHash(msgTx.TxIn[stakeSubmissionVinInd].PreviousOutPoint.Hash)
ticketHashes = append(ticketHashes, ticketHash)
// ticket's row ID in *tickets* table
@@ -4371,7 +4457,7 @@ func (pgb *ChainDB) CollectTicketSpendDBInfo(dbTxns []*dbtypes.Tx, txDbIDs []uin
return
}
-// UpdateSpendingInfoInAllAddresses completely rebuilds the matching transaction
+// updateSpendingInfoInAllAddresses completely rebuilds the matching transaction
// columns for funding rows of the addresses table. This is intended to be use
// after syncing all other tables and creating their indexes, particularly the
// indexes on the vins table, and the addresses table index on the funding tx
@@ -4379,10 +4465,10 @@ func (pgb *ChainDB) CollectTicketSpendDBInfo(dbTxns []*dbtypes.Tx, txDbIDs []uin
// with storeBlockTxnTree, which will update these addresses table columns too,
// but much more slowly for a number of reasons (that are well worth
// investigating BTW!).
-func (pgb *ChainDB) UpdateSpendingInfoInAllAddresses(barLoad chan *dbtypes.ProgressBarLoad) (int64, error) {
+func (pgb *ChainDB) updateSpendingInfoInAllAddresses(barLoad chan *dbtypes.ProgressBarLoad) (int64, error) {
heightDB, err := pgb.HeightDB()
if err != nil {
- return 0, fmt.Errorf("DBBestBlock: %w", err)
+ return 0, fmt.Errorf("dbBestBlock: %w", err)
}
tStart := time.Now()
@@ -4434,6 +4520,7 @@ func (pgb *ChainDB) UpdateSpendingInfoInAllAddresses(barLoad chan *dbtypes.Progr
// UpdateSpendingInfoInAllTickets reviews all votes and revokes and sets this
// spending info in the tickets table.
+/*
func (pgb *ChainDB) UpdateSpendingInfoInAllTickets() (int64, error) {
// The queries in this function should not timeout or (probably) canceled,
// so use a background context.
@@ -4441,9 +4528,9 @@ func (pgb *ChainDB) UpdateSpendingInfoInAllTickets() (int64, error) {
// Get the full list of votes (DB IDs and heights), and spent ticket hashes
allVotesDbIDs, allVotesHeights, ticketDbIDs, err :=
- RetrieveAllVotesDbIDsHeightsTicketDbIDs(ctx, pgb.db)
+ retrieveAllVotesDbIDsHeightsTicketDbIDs(ctx, pgb.db)
if err != nil {
- log.Errorf("RetrieveAllVotesDbIDsHeightsTicketDbIDs: %v", err)
+ log.Errorf("retrieveAllVotesDbIDsHeightsTicketDbIDs: %v", err)
return 0, err
}
@@ -4465,24 +4552,24 @@ func (pgb *ChainDB) UpdateSpendingInfoInAllTickets() (int64, error) {
// Revokes
- revokeIDs, _, revokeHeights, vinDbIDs, err := RetrieveAllRevokes(ctx, pgb.db)
+ revokeIDs, _, revokeHeights, vinDbIDs, err := retrieveAllRevokes(ctx, pgb.db)
if err != nil {
- log.Errorf("RetrieveAllRevokes: %v", err)
+ log.Errorf("retrieveAllRevokes: %v", err)
return 0, err
}
revokedTicketHashes := make([]string, len(vinDbIDs))
for i, vinDbID := range vinDbIDs {
- revokedTicketHashes[i], err = RetrieveFundingTxByVinDbID(ctx, pgb.db, vinDbID)
+ revokedTicketHashes[i], err = retrieveFundingTxByVinDbID(ctx, pgb.db, vinDbID)
if err != nil {
- log.Errorf("RetrieveFundingTxByVinDbID: %v", err)
+ log.Errorf("retrieveFundingTxByVinDbID: %v", err)
return 0, err
}
}
- revokedTicketDbIDs, err := RetrieveTicketIDsByHashes(ctx, pgb.db, revokedTicketHashes)
+ revokedTicketDbIDs, err := retrieveTicketIDsByHashes(ctx, pgb.db, revokedTicketHashes)
if err != nil {
- log.Errorf("RetrieveTicketIDsByHashes: %v", err)
+ log.Errorf("retrieveTicketIDsByHashes: %v", err)
return 0, err
}
@@ -4521,6 +4608,7 @@ func ticketpoolStatusSlice(ss dbtypes.TicketPoolStatus, N int) []dbtypes.TicketP
}
return S
}
+*/
// GetChainWork fetches the chainjson.BlockHeaderVerbose and returns only the
// ChainWork attribute as a hex-encoded string, without 0x prefix.
@@ -4529,12 +4617,14 @@ func (pgb *ChainDB) GetChainWork(hash *chainhash.Hash) (string, error) {
}
// GenesisStamp returns the stamp of the lowest mainchain block in the database.
+/*
func (pgb *ChainDB) GenesisStamp() int64 {
tDef := dbtypes.NewTimeDefFromUNIX(0)
// Ignoring error and returning zero time.
_ = pgb.db.QueryRowContext(pgb.ctx, internal.SelectGenesisTime).Scan(&tDef)
return tDef.T.Unix()
}
+*/
// GetStakeInfoExtendedByHash fetches a apitypes.StakeInfoExtended, containing
// comprehensive data for the state of staking at a given block.
@@ -4554,13 +4644,17 @@ func (pgb *ChainDB) GetStakeInfoExtendedByHash(hashStr string) *apitypes.StakeIn
height := msgBlock.Header.Height
var size, val int64
- var winners []string
+ var winners dbtypes.ChainHashArray
err = pgb.db.QueryRowContext(pgb.ctx, internal.SelectPoolInfo,
- hashStr).Scan(pq.Array(&winners), &val, &size)
+ (*dbtypes.ChainHash)(hash)).Scan(&winners, &val, &size)
if err != nil {
log.Errorf("Error retrieving mainchain block with stats for hash %s: %v", hashStr, err)
return nil
}
+ winnersStr := make([]string, len(winners))
+ for i := range winners {
+ winnersStr[i] = winners[i].String()
+ }
coin := dcrutil.Amount(val).ToCoin()
tpi := &apitypes.TicketPoolInfo{
@@ -4568,7 +4662,7 @@ func (pgb *ChainDB) GetStakeInfoExtendedByHash(hashStr string) *apitypes.StakeIn
Size: uint32(size),
Value: coin,
ValAvg: coin / float64(size),
- Winners: winners,
+ Winners: winnersStr,
}
windowSize := uint32(pgb.chainParams.StakeDiffWindowSize)
@@ -4597,7 +4691,7 @@ func (pgb *ChainDB) GetStakeInfoExtendedByHeight(height int) *apitypes.StakeInfo
// GetPoolInfo retrieves the ticket pool statistics at the specified height.
func (pgb *ChainDB) GetPoolInfo(idx int) *apitypes.TicketPoolInfo {
- ticketPoolInfo, err := RetrievePoolInfo(pgb.ctx, pgb.db, int64(idx))
+ ticketPoolInfo, err := retrievePoolInfo(pgb.ctx, pgb.db, int64(idx))
if err != nil {
log.Errorf("Unable to retrieve ticket pool info: %v", err)
return nil
@@ -4607,14 +4701,16 @@ func (pgb *ChainDB) GetPoolInfo(idx int) *apitypes.TicketPoolInfo {
// GetPoolInfoByHash retrieves the ticket pool statistics at the specified block
// hash.
+/*
func (pgb *ChainDB) GetPoolInfoByHash(hash string) *apitypes.TicketPoolInfo {
- ticketPoolInfo, err := RetrievePoolInfoByHash(pgb.ctx, pgb.db, hash)
+ ticketPoolInfo, err := retrievePoolInfoByHash(pgb.ctx, pgb.db, hash)
if err != nil {
log.Errorf("Unable to retrieve ticket pool info: %v", err)
return nil
}
return ticketPoolInfo
}
+*/
// GetPoolInfoRange retrieves the ticket pool statistics for a range of block
// heights, as a slice.
@@ -4626,7 +4722,7 @@ func (pgb *ChainDB) GetPoolInfoRange(idx0, idx1 int) []apitypes.TicketPoolInfo {
log.Errorf("Unable to retrieve ticket pool info for range [%d, %d], tip=%d", idx0, idx1, tip)
return nil
}
- ticketPoolInfos, _, err := RetrievePoolInfoRange(pgb.ctx, pgb.db, ind0, ind1)
+ ticketPoolInfos, _, err := retrievePoolInfoRange(pgb.ctx, pgb.db, ind0, ind1)
if err != nil {
log.Errorf("Unable to retrieve ticket pool info range: %v", err)
return nil
@@ -4644,7 +4740,7 @@ func (pgb *ChainDB) GetPoolValAndSizeRange(idx0, idx1 int) ([]float64, []uint32)
log.Errorf("Unable to retrieve ticket pool info for range [%d, %d], tip=%d", idx0, idx1, tip)
return nil, nil
}
- poolvals, poolsizes, err := RetrievePoolValAndSizeRange(pgb.ctx, pgb.db, ind0, ind1)
+ poolvals, poolsizes, err := retrievePoolValAndSizeRange(pgb.ctx, pgb.db, ind0, ind1)
if err != nil {
log.Errorf("Unable to retrieve ticket value and size range: %v", err)
return nil, nil
@@ -4663,18 +4759,14 @@ func (pgb *ChainDB) ChargePoolInfoCache(startHeight int64) error {
log.Debug("No pool info to load into cache")
return nil
}
- tpis, blockHashes, err := RetrievePoolInfoRange(pgb.ctx, pgb.db, startHeight, endHeight)
+ tpis, blockHashes, err := retrievePoolInfoRange(pgb.ctx, pgb.db, startHeight, endHeight)
if err != nil {
return err
}
log.Debugf("Pre-loading pool info for %d blocks ([%d, %d]) into cache.",
len(tpis), startHeight, endHeight)
for i := range tpis {
- hash, err := chainhash.NewHashFromStr(blockHashes[i])
- if err != nil {
- log.Warnf("Invalid block hash: %s", blockHashes[i])
- }
- pgb.stakeDB.SetPoolInfo(*hash, &tpis[i])
+ pgb.stakeDB.SetPoolInfo(chainhash.Hash(blockHashes[i]), &tpis[i])
}
return nil
}
@@ -4812,7 +4904,7 @@ func (pgb *ChainDB) GetBlockByHash(hash string) (*wire.MsgBlock, error) {
log.Errorf("Invalid block hash %s", hash)
return nil, err
}
- return pgb.Client.GetBlock(context.TODO(), blockHash)
+ return pgb.Client.GetBlock(pgb.ctx, blockHash)
}
// GetHeader fetches the *chainjson.GetBlockHeaderVerboseResult for a given
@@ -4829,18 +4921,18 @@ func (pgb *ChainDB) GetBlockHeaderByHash(hash string) (*wire.BlockHeader, error)
log.Errorf("Invalid block hash %s", hash)
return nil, err
}
- return pgb.Client.GetBlockHeader(context.TODO(), blockHash)
+ return pgb.Client.GetBlockHeader(pgb.ctx, blockHash)
}
// GetBlockHeight returns the height of the block with the specified hash.
func (pgb *ChainDB) GetBlockHeight(hash string) (int64, error) {
- // _, err := chainhash.NewHashFromStr(hash)
- // if err != nil {
- // return -1, err
- // }
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return 0, err
+ }
ctx, cancel := context.WithTimeout(pgb.ctx, pgb.queryTimeout)
defer cancel()
- height, err := RetrieveBlockHeight(ctx, pgb.db, hash)
+ height, err := retrieveBlockHeight(ctx, pgb.db, ch)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
log.Errorf("Unexpected error retrieving block height for hash %s: %v", hash, err)
@@ -4941,7 +5033,7 @@ func (pgb *ChainDB) GetTrimmedTransaction(txid *chainhash.Hash) *apitypes.Trimme
// the Choices field of VoteInfo may be a nil slice even if the votebits were
// set for a previously-valid agenda.
func (pgb *ChainDB) GetVoteInfo(txhash *chainhash.Hash) (*apitypes.VoteInfo, error) {
- tx, err := pgb.Client.GetRawTransaction(context.TODO(), txhash)
+ tx, err := pgb.Client.GetRawTransaction(pgb.ctx, txhash)
if err != nil {
log.Errorf("GetRawTransaction failed for: %v", txhash)
return nil, nil
@@ -4967,14 +5059,14 @@ func (pgb *ChainDB) GetVoteInfo(txhash *chainhash.Hash) (*apitypes.VoteInfo, err
// GetVoteVersionInfo requests stake version info from the dcrd RPC server
func (pgb *ChainDB) GetVoteVersionInfo(ver uint32) (*chainjson.GetVoteInfoResult, error) {
- return pgb.Client.GetVoteInfo(context.TODO(), ver)
+ return pgb.Client.GetVoteInfo(pgb.ctx, ver)
}
// GetStakeVersions requests the output of the getstakeversions RPC, which gets
// stake version information and individual vote version information starting at the
// given block and for count-1 blocks prior.
func (pgb *ChainDB) GetStakeVersions(blockHash string, count int32) (*chainjson.GetStakeVersionsResult, error) {
- return pgb.Client.GetStakeVersions(context.TODO(), blockHash, count)
+ return pgb.Client.GetStakeVersions(pgb.ctx, blockHash, count)
}
// GetStakeVersionsLatest requests the output of the getstakeversions RPC for
@@ -4992,7 +5084,7 @@ func (pgb *ChainDB) GetStakeVersionsLatest() (*chainjson.StakeVersions, error) {
// GetAllTxIn gets all transaction inputs, as a slice of *apitypes.TxIn, for a
// given transaction ID.
func (pgb *ChainDB) GetAllTxIn(txid *chainhash.Hash) []*apitypes.TxIn {
- tx, err := pgb.Client.GetRawTransaction(context.TODO(), txid)
+ tx, err := pgb.Client.GetRawTransaction(pgb.ctx, txid)
if err != nil {
log.Errorf("Unknown transaction %s", txid)
return nil
@@ -5024,7 +5116,7 @@ func (pgb *ChainDB) GetAllTxIn(txid *chainhash.Hash) []*apitypes.TxIn {
func (pgb *ChainDB) GetAllTxOut(txid *chainhash.Hash) []*apitypes.TxOut {
// Get the TxRawResult since it provides Asm and CommitAmt for all the
// output scripts, but we could extract that info too.
- tx, err := pgb.Client.GetRawTransactionVerbose(context.TODO(), txid)
+ tx, err := pgb.Client.GetRawTransactionVerbose(pgb.ctx, txid)
if err != nil {
log.Warnf("Unknown transaction %s", txid)
return nil
@@ -5107,7 +5199,7 @@ func (pgb *ChainDB) BlockSummary(ind int64) (*apitypes.BlockDataBasic, error) {
// Cache miss necessitates a DB query.
}
- bd, err := RetrieveBlockSummary(pgb.ctx, pgb.db, ind)
+ bd, err := retrieveBlockSummary(pgb.ctx, pgb.db, ind)
if err != nil {
return nil, err
}
@@ -5138,7 +5230,7 @@ func (pgb *ChainDB) GetSummaryRange(idx0, idx1 int) []*apitypes.BlockDataBasic {
// BlockSummaryRange returns the *apitypes.BlockDataBasic for a range of block
// height.
func (pgb *ChainDB) BlockSummaryRange(idx0, idx1 int64) ([]*apitypes.BlockDataBasic, error) {
- return RetrieveBlockSummaryRange(pgb.ctx, pgb.db, idx0, idx1)
+ return retrieveBlockSummaryRange(pgb.ctx, pgb.db, idx0, idx1)
}
// GetSummaryRangeStepped returns the []*apitypes.BlockDataBasic for a given
@@ -5156,7 +5248,7 @@ func (pgb *ChainDB) GetSummaryRangeStepped(idx0, idx1, step int) []*apitypes.Blo
// BlockSummaryRangeStepped returns the []*apitypes.BlockDataBasic for every
// step'th block in a specified range.
func (pgb *ChainDB) BlockSummaryRangeStepped(idx0, idx1, step int64) ([]*apitypes.BlockDataBasic, error) {
- return RetrieveBlockSummaryRangeStepped(pgb.ctx, pgb.db, idx0, idx1, step)
+ return retrieveBlockSummaryRangeStepped(pgb.ctx, pgb.db, idx0, idx1, step)
}
// GetSummaryByHash returns a *apitypes.BlockDataBasic for a given hex-encoded
@@ -5225,7 +5317,11 @@ func (pgb *ChainDB) BlockSummaryByHash(hash string) (*apitypes.BlockDataBasic, e
// Cache miss necessitates a DB query.
}
- bd, err := RetrieveBlockSummaryByHash(pgb.ctx, pgb.db, hash)
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ return nil, err
+ }
+ bd, err := retrieveBlockSummaryByHash(pgb.ctx, pgb.db, ch)
if err != nil {
return nil, err
}
@@ -5308,7 +5404,7 @@ func (pgb *ChainDB) BlockSize(ind int64) (int32, error) {
ind, tip)
}
- return RetrieveBlockSize(pgb.ctx, pgb.db, ind)
+ return retrieveBlockSize(pgb.ctx, pgb.db, ind)
}
// BlockSizeRange returns an array of block sizes for block range ind0 to ind1
@@ -5318,12 +5414,12 @@ func (pgb *ChainDB) BlockSizeRange(ind0, ind1 int64) ([]int32, error) {
return nil, fmt.Errorf("Cannot retrieve block size range [%d,%d], have height %d",
ind0, ind1, tip)
}
- return RetrieveBlockSizeRange(pgb.ctx, pgb.db, ind0, ind1)
+ return retrieveBlockSizeRange(pgb.ctx, pgb.db, ind0, ind1)
}
// GetSDiff gets the stake difficulty in DCR for a given block height.
func (pgb *ChainDB) GetSDiff(idx int) float64 {
- sdiff, err := RetrieveSDiff(pgb.ctx, pgb.db, int64(idx))
+ sdiff, err := retrieveSDiff(pgb.ctx, pgb.db, int64(idx))
if err != nil {
log.Errorf("Unable to retrieve stake difficulty: %v", err)
return -1
@@ -5333,7 +5429,12 @@ func (pgb *ChainDB) GetSDiff(idx int) float64 {
// GetSBitsByHash gets the stake difficulty in DCR for a given block height.
func (pgb *ChainDB) GetSBitsByHash(hash string) int64 {
- sbits, err := RetrieveSBitsByHash(pgb.ctx, pgb.db, hash)
+ ch, err := chainHashFromStr(hash)
+ if err != nil {
+ log.Errorf("invalid hash %s", hash)
+ return 0
+ }
+ sbits, err := retrieveSBitsByHash(pgb.ctx, pgb.db, ch)
if err != nil {
log.Errorf("Unable to retrieve stake difficulty: %v", err)
return -1
@@ -5359,7 +5460,7 @@ func (pgb *ChainDB) SDiffRange(ind0, ind1 int64) ([]float64, error) {
return nil, fmt.Errorf("Cannot retrieve sdiff range [%d,%d], have height %d",
ind0, ind1, tip)
}
- return RetrieveSDiffRange(pgb.ctx, pgb.db, ind0, ind1)
+ return retrieveSDiffRange(pgb.ctx, pgb.db, ind0, ind1)
}
// GetMempoolSSTxSummary returns the current *apitypes.MempoolTicketFeeInfo.
@@ -5523,17 +5624,18 @@ func (pgb *ChainDB) GetAddressTransactionsRawWithSkip(addr string, count, skip i
*apitypes.VinShort
idx int32
}
- vins := make(map[string][]*vinIndexed)
+ vins := make(map[dbtypes.ChainHash][]*vinIndexed)
for rows.Next() {
- var txid string // redeeming tx
+ var txid, vinTxID dbtypes.ChainHash // redeeming tx
var idx int32
var vin apitypes.VinShort
var val int64
- if err = rows.Scan(&txid, &idx, &vin.Txid, &vin.Vout, &vin.Tree, &val,
+ if err = rows.Scan(&txid, &idx, &vinTxID, &vin.Vout, &vin.Tree, &val,
&vin.BlockHeight, &vin.BlockIndex); err != nil {
log.Errorf("GetAddressTransactionsRawWithSkip: SelectVinsForAddress %s: %v", addr, err)
return nil
}
+ vin.Txid = vinTxID.String()
if val > 0 {
vin.AmountIn = dcrutil.Amount(val).ToCoin()
@@ -5550,24 +5652,27 @@ func (pgb *ChainDB) GetAddressTransactionsRawWithSkip(addr string, count, skip i
}
defer rows.Close()
- txns := make(map[string]*apitypes.AddressTxRaw)
+ txns := make(map[dbtypes.ChainHash]*apitypes.AddressTxRaw)
for rows.Next() {
var tx apitypes.AddressTxRaw
var blockHeight int64
var numVins, numVouts int32
+ var txid, blockHash dbtypes.ChainHash
// var vinDbIDs, voutDbIDs pq.Int64Array
- if err = rows.Scan(&tx.TxID, &tx.BlockHash, &blockHeight,
+ if err = rows.Scan(&txid, &blockHash, &blockHeight,
&tx.Time.S, &tx.Version, &tx.Locktime, &tx.Size, &tx.Type, &numVins, &numVouts /*, &vinDbIDs, &voutDbIDs*/); err != nil {
log.Errorf("GetAddressTransactionsRawWithSkip: Scan %s: %v", addr, err)
return nil
}
+ tx.TxID = txid.String()
+ tx.BlockHash = blockHash.String()
tx.Blocktime = &tx.Time
tx.Confirmations = height - blockHeight + 1
- txns[tx.TxID] = &tx
+ txns[txid] = &tx
- txVins, found := vins[tx.TxID]
+ txVins, found := vins[txid]
if found {
sort.Slice(txVins, func(i, j int) bool {
return txVins[i].idx < txVins[j].idx
@@ -5606,11 +5711,10 @@ func (pgb *ChainDB) GetAddressTransactionsRawWithSkip(addr string, count, skip i
defer rows.Close()
for rows.Next() {
- var txid string // funding tx
+ var txid dbtypes.ChainHash // funding tx
var vout apitypes.Vout
var val int64
- var pkScript []byte
- if err = rows.Scan(&val, &txid, &vout.N, &vout.Version, &pkScript); err != nil {
+ if err = rows.Scan(&val, &txid, &vout.N, &vout.Version); err != nil {
log.Errorf("GetAddressTransactionsRawWithSkip: SelectVoutsForAddress %s: %v", addr, err)
return nil
}
@@ -5625,11 +5729,29 @@ func (pgb *ChainDB) GetAddressTransactionsRawWithSkip(addr string, count, skip i
continue
}
- isTicketCommit := stake.TxType(tx.Type) == stake.TxTypeSStx && (vout.N%2 != 0)
- vout.ScriptPubKeyDecoded = decPkScript(vout.Version, pkScript, isTicketCommit, pgb.chainParams)
+ // isTicketCommit := stake.TxType(tx.Type) == stake.TxTypeSStx && (vout.N%2 != 0)
+ // vout.ScriptPubKeyDecoded = decPkScript(vout.Version, pkScript, isTicketCommit, pgb.chainParams)
tx.Vout = append(tx.Vout, vout)
}
+ // Get pkscripts that db doesn't have.
+ for hash, atx := range txns {
+ ch := chainhash.Hash(hash)
+ rawTx, err := pgb.Client.GetRawTransaction(ctx, &ch)
+ if err != nil {
+ log.Errorf("GetRawTransaction(%v): %w", ch, err)
+ continue
+ }
+ for i, txOut := range rawTx.MsgTx().TxOut {
+ if i >= len(atx.Vout) {
+ log.Errorf("too many outputs in raw tx (%d) compared to DB (%d)", len(rawTx.MsgTx().TxOut), len(atx.Vout))
+ continue
+ }
+ isTicketCommit := stake.TxType(atx.Type) == stake.TxTypeSStx && (i%2 != 0)
+ atx.Vout[i].ScriptPubKeyDecoded = decPkScript(txOut.Version, txOut.PkScript, isTicketCommit, pgb.chainParams)
+ }
+ }
+
for _, tx := range txs {
sort.Slice(tx.Vout, func(i, j int) bool {
return tx.Vout[i].N < tx.Vout[j].N
@@ -5652,9 +5774,8 @@ func (pgb *ChainDB) GetChainParams() *chaincfg.Params {
// GetBlockVerbose fetches the *chainjson.GetBlockVerboseResult for a given
// block height. Optionally include verbose transactions.
-func (pgb *ChainDB) GetBlockVerbose(idx int, verboseTx bool) *chainjson.GetBlockVerboseResult {
- block := rpcutils.GetBlockVerbose(pgb.Client, int64(idx), verboseTx)
- return block
+func (pgb *ChainDB) getBlockVerbose(idx int, verboseTx bool) *chainjson.GetBlockVerboseResult {
+ return rpcutils.GetBlockVerbose(pgb.Client, int64(idx), verboseTx)
}
func sumOutsTxRawResult(txs []chainjson.TxRawResult) (sum float64) {
@@ -5973,7 +6094,7 @@ func (pgb *ChainDB) GetExplorerBlocks(start int, end int) []*exptypes.BlockBasic
}
summaries := make([]*exptypes.BlockBasic, 0, start-end)
for i := start; i > end; i-- {
- data := pgb.GetBlockVerbose(i, true)
+ data := pgb.getBlockVerbose(i, true)
block := new(exptypes.BlockBasic)
if data != nil {
block = makeExplorerBlockBasic(data, pgb.chainParams)
@@ -6053,7 +6174,7 @@ func (pgb *ChainDB) GetExplorerTx(txid string) *exptypes.TxInfo {
return nil
}
- txBasic, txType := makeExplorerTxBasic(txraw, ticketPrice, msgTx, pgb.chainParams)
+ txBasic, _ /* txType */ := makeExplorerTxBasic(txraw, ticketPrice, msgTx, pgb.chainParams)
tx := &exptypes.TxInfo{
TxBasic: txBasic,
BlockHeight: txraw.BlockHeight,
@@ -6063,8 +6184,6 @@ func (pgb *ChainDB) GetExplorerTx(txid string) *exptypes.TxInfo {
Time: exptypes.NewTimeDefFromUNIX(txraw.Time),
}
- // tree := txType stake.TxTypeRegular
-
inputs := make([]exptypes.Vin, 0, len(txraw.Vin))
for i := range txraw.Vin {
vin := &txraw.Vin[i]
@@ -6106,7 +6225,7 @@ func (pgb *ChainDB) GetExplorerTx(txid string) *exptypes.TxInfo {
} else {
// Alt. DB-powered height lookup:
// vin.BlockHeight = uint32(pgb.TxHeight(vinHash))
- prevTx, err := pgb.Client.GetRawTransactionVerbose(context.TODO(), vinHash)
+ prevTx, err := pgb.Client.GetRawTransactionVerbose(pgb.ctx, vinHash)
if err == nil {
vin.BlockHeight = uint32(prevTx.BlockHeight)
} else {
@@ -6267,43 +6386,67 @@ func (pgb *ChainDB) GetExplorerTx(txid string) *exptypes.TxInfo {
}
}
- tree := wire.TxTreeStake
- if txType == stake.TxTypeRegular {
- tree = wire.TxTreeRegular
- }
+ // tree := wire.TxTreeStake
+ // if txType == stake.TxTypeRegular {
+ // tree = wire.TxTreeRegular
+ // }
CoinbaseMaturityInHours := (pgb.chainParams.TargetTimePerBlock.Hours() * float64(pgb.chainParams.CoinbaseMaturity))
tx.MaturityTimeTill = ((float64(pgb.chainParams.CoinbaseMaturity) -
float64(tx.Confirmations)) / float64(pgb.chainParams.CoinbaseMaturity)) * CoinbaseMaturityInHours
outputs := make([]exptypes.Vout, 0, len(txraw.Vout))
- for i, vout := range txraw.Vout {
- // Determine spent status with gettxout, including mempool.
- txout, err := pgb.Client.GetTxOut(context.TODO(), txhash, uint32(i), tree, true)
- if err != nil {
- log.Warnf("Failed to determine if tx out is spent for output %d of tx %s: %v", i, txid, err)
+ for i := range txraw.Vout {
+ vout := &txraw.Vout[i]
+
+ // If uncertain (this tx is unconfirmed or the spender is unconfirmed),
+ // go with spent == false, since caller should check for unconfirmed
+ // spenders.
+ var spent bool
+ if txraw.Confirmations > 0 {
+ utxoData := pgb.utxoCache.Peek(dbtypes.ChainHash(*txhash), uint32(i))
+ spent = utxoData == nil // if true, definitely spent since this tx is confirmed; if false, spender might be unconfirmed!
+ }
+ // txout, err := pgb.Client.GetTxOut(pgb.ctx, txhash, uint32(i), tree, true)
+ // if err != nil {
+ // log.Warnf("Failed to determine if tx out is spent for output %d of tx %s: %v", i, txid, err)
+ // }
+ // gettxout never indicates spent if the spending tx is unconfirmed,
+ // which is also necessarily the case if this tx is unconfirmed. Caller
+ // has to check mempool for that, so we will leave Spent false.
+
+ /* this tx is unconfirmed, gettxout won't ever say if the output is spent
+ else {
+ txout, err := pgb.Client.GetTxOut(pgb.ctx, txhash, uint32(i), tree, true)
+ if err != nil {
+ log.Warnf("Failed to determine if tx out is spent for output %d of tx %s: %v", i, txid, err)
+ }
+ spent = txout == nil
}
+ */
+
+ spk := &vout.ScriptPubKey
var opReturn string
var opTAdd bool
- if strings.HasPrefix(vout.ScriptPubKey.Asm, "OP_RETURN") {
- opReturn = vout.ScriptPubKey.Asm
+ if strings.HasPrefix(spk.Asm, "OP_RETURN") {
+ opReturn = spk.Asm
} else {
- opTAdd = strings.HasPrefix(vout.ScriptPubKey.Asm, "OP_TADD")
+ opTAdd = strings.HasPrefix(spk.Asm, "OP_TADD")
}
// Get a consistent script class string from dbtypes.ScriptClass.
pkScript, version := msgTx.TxOut[i].PkScript, msgTx.TxOut[i].Version
scriptClass := dbtypes.NewScriptClass(stdscript.DetermineScriptType(version, pkScript))
- if scriptClass == dbtypes.SCNullData && vout.ScriptPubKey.CommitAmt != nil {
+ if scriptClass == dbtypes.SCNullData && spk.CommitAmt != nil {
scriptClass = dbtypes.SCStakeSubCommit
}
outputs = append(outputs, exptypes.Vout{
- Addresses: vout.ScriptPubKey.Addresses,
+ Addresses: spk.Addresses,
Amount: vout.Value,
FormattedAmount: humanize.Commaf(vout.Value),
OP_RETURN: opReturn,
OP_TADD: opTAdd,
Type: scriptClass.String(),
- Spent: txout == nil,
+ Spent: spent,
Index: vout.N,
Version: version,
})
@@ -6346,7 +6489,7 @@ func (pgb *ChainDB) getTip() (*apitypes.BlockDataBasic, error) {
if pgb.tipSummary != nil && pgb.tipSummary.Hash == pgb.BestBlockHashStr() {
return pgb.tipSummary, nil
}
- tip, err := RetrieveLatestBlockSummary(pgb.ctx, pgb.db)
+ tip, err := retrieveLatestBlockSummary(pgb.ctx, pgb.db)
if err != nil {
return nil, err
}
@@ -6362,7 +6505,7 @@ func (pgb *ChainDB) DecodeRawTransaction(txhex string) (*chainjson.TxRawResult,
log.Errorf("DecodeRawTransaction failed: %v", err)
return nil, err
}
- tx, err := pgb.Client.DecodeRawTransaction(context.TODO(), bytes)
+ tx, err := pgb.Client.DecodeRawTransaction(pgb.ctx, bytes)
if err != nil {
log.Errorf("DecodeRawTransaction failed: %v", err)
return nil, err
@@ -6373,19 +6516,13 @@ func (pgb *ChainDB) DecodeRawTransaction(txhex string) (*chainjson.TxRawResult,
// TxHeight gives the block height of the transaction id specified
func (pgb *ChainDB) TxHeight(txid *chainhash.Hash) (height int64) {
// Alt. DB-powered height lookup:
- // txBlocks, _, err := pgb.TransactionBlocks(txid.String())
+ // txBlock, _, _, err := pgb.TransactionBlock(txid.String())
// if err != nil {
- // log.Errorf("TransactionBlocks failed for: %v", txid)
+ // log.Errorf("TransactionBlock failed for: %v", txid)
// return 0
// }
- // // ordered by valid, mainchain, height
- // for _, block := range txBlocks {
- // if block.IsMainchain {
- // return int64(txBlocks[0].Height)
- // }
- // }
- // return 0
- txraw, err := pgb.Client.GetRawTransactionVerbose(context.TODO(), txid)
+ // return txBlock.Height
+ txraw, err := pgb.Client.GetRawTransactionVerbose(pgb.ctx, txid)
if err != nil {
log.Errorf("GetRawTransactionVerbose failed for: %v", txid)
return 0
@@ -6402,7 +6539,7 @@ func (pgb *ChainDB) GetExplorerFullBlocks(start int, end int) []*exptypes.BlockI
}
summaries := make([]*exptypes.BlockInfo, 0, start-end)
for i := start; i > end; i-- {
- data := pgb.GetBlockVerbose(i, true)
+ data := pgb.getBlockVerbose(i, true)
block := new(exptypes.BlockInfo)
if data != nil {
block = pgb.GetExplorerBlock(data.Hash)
@@ -6414,7 +6551,10 @@ func (pgb *ChainDB) GetExplorerFullBlocks(start int, end int) []*exptypes.BlockI
// CurrentDifficulty returns the current difficulty from dcrd.
func (pgb *ChainDB) CurrentDifficulty() (float64, error) {
- diff, err := pgb.Client.GetDifficulty(context.TODO())
+ // pgb.lastExplorerBlock.Lock()
+ // defer pgb.lastExplorerBlock.Unlock()
+ // return pgb.lastExplorerBlock.blockInfo.Difficulty, nil // last block; same as next? probably not
+ diff, err := pgb.Client.GetDifficulty(pgb.ctx)
if err != nil {
log.Error("GetDifficulty failed")
return diff, err
@@ -6432,7 +6572,7 @@ func (pgb *ChainDB) Difficulty(timestamp int64) float64 {
return diff
}
- diff, err := RetrieveDiff(pgb.ctx, pgb.db, timestamp)
+ diff, err := retrieveDiff(pgb.ctx, pgb.db, timestamp)
if err != nil {
log.Errorf("Unable to retrieve difficulty: %v", err)
return -1
@@ -6447,6 +6587,7 @@ func (pgb *ChainDB) Difficulty(timestamp int64) float64 {
// total out for all the txs and vote info for the votes. The returned slice
// will be nil if the GetRawMempoolVerbose RPC fails. A zero-length non-nil
// slice is returned if there are no transactions in mempool. UNUSED?
+/*
func (pgb *ChainDB) GetMempool() []exptypes.MempoolTx {
mempooltxs, err := pgb.Client.GetRawMempoolVerbose(pgb.ctx, chainjson.GRMAll)
if err != nil {
@@ -6515,11 +6656,12 @@ func (pgb *ChainDB) GetMempool() []exptypes.MempoolTx {
return txs
}
+*/
// BlockchainInfo retrieves the result of the getblockchaininfo node RPC.
-func (pgb *ChainDB) BlockchainInfo() (*chainjson.GetBlockChainInfoResult, error) {
- return pgb.Client.GetBlockChainInfo(pgb.ctx)
-}
+// func (pgb *ChainDB) BlockchainInfo() (*chainjson.GetBlockChainInfoResult, error) {
+// return pgb.Client.GetBlockChainInfo(pgb.ctx)
+// }
// UpdateChan creates a channel that will receive height updates. All calls to
// UpdateChan should be completed before blocks start being connected.
@@ -6542,69 +6684,3 @@ func (pgb *ChainDB) SignalHeight(height uint32) {
}
}
}
-
-func (pgb *ChainDB) MixedUtxosByHeight() (heights, utxoCountReg, utxoValueReg, utxoCountStk, utxoValueStk []int64, err error) {
- var rows *sql.Rows
- rows, err = pgb.db.Query(internal.SelectMixedVouts, -1)
- if err != nil {
- return
- }
- defer rows.Close()
-
- var vals, fundHeights, spendHeights []int64
- var trees []uint8
-
- var maxHeight int64
- minHeight := int64(math.MaxInt64)
- for rows.Next() {
- var value, fundHeight, spendHeight int64
- var spendHeightNull sql.NullInt64
- var tree uint8
- err = rows.Scan(&value, &fundHeight, &spendHeightNull, &tree)
- if err != nil {
- return
- }
- vals = append(vals, value)
- fundHeights = append(fundHeights, fundHeight)
- trees = append(trees, tree)
- if spendHeightNull.Valid {
- spendHeight = spendHeightNull.Int64
- } else {
- spendHeight = -1
- }
- spendHeights = append(spendHeights, spendHeight)
- if fundHeight < minHeight {
- minHeight = fundHeight
- }
- if spendHeight > maxHeight {
- maxHeight = spendHeight
- }
- }
-
- N := maxHeight - minHeight + 1
- heights = make([]int64, N)
- utxoCountReg = make([]int64, N)
- utxoValueReg = make([]int64, N)
- utxoCountStk = make([]int64, N)
- utxoValueStk = make([]int64, N)
-
- for h := minHeight; h <= maxHeight; h++ {
- i := h - minHeight
- heights[i] = h
- for iu := range vals {
- if h >= fundHeights[iu] && (h < spendHeights[iu] || spendHeights[iu] == -1) {
- if trees[iu] == 0 {
- utxoCountReg[i]++
- utxoValueReg[i] += vals[iu]
- } else {
- utxoCountStk[i]++
- utxoValueStk[i] += vals[iu]
- }
- }
- }
- }
-
- err = rows.Err()
- return
-
-}
diff --git a/db/dcrpg/pgblockchain_fullpgdb_test.go b/db/dcrpg/pgblockchain_fullpgdb_test.go
index 954aecad1..4c4b9f488 100644
--- a/db/dcrpg/pgblockchain_fullpgdb_test.go
+++ b/db/dcrpg/pgblockchain_fullpgdb_test.go
@@ -3,10 +3,8 @@
package dcrpg
import (
- "encoding/csv"
"encoding/json"
"fmt"
- "os"
"testing"
"time"
@@ -32,36 +30,6 @@ func TestGetAddressTransactionsRawWithSkip(t *testing.T) {
t.Log(d)
}
-func TestMixedUtxosByHeight(t *testing.T) {
- heights, utxoCountReg, utxoValueReg, utxoCountStk, utxoValueStk, err := db.MixedUtxosByHeight()
- if err != nil {
- t.Fatalf("failed: %v", err)
- }
-
- csvfile, err := os.Create("utxos.csv")
- if err != nil {
- t.Fatalf("error creating utxos file: %s", err)
- }
- defer csvfile.Close()
-
- csvwriter := csv.NewWriter(csvfile)
- defer csvwriter.Flush()
-
- for i := range heights {
- err = csvwriter.Write([]string{
- fmt.Sprint(heights[i]),
- fmt.Sprint(utxoCountReg[i]),
- fmt.Sprint(utxoValueReg[i] / 1e8),
- fmt.Sprint(utxoCountStk[i]),
- fmt.Sprint(utxoValueStk[i] / 1e8),
- })
- if err != nil {
- t.Fatalf("csvwriter.Write: %s", err)
- }
- }
-
-}
-
func TestAddressRows(t *testing.T) {
rows, err := db.AddressRowsMerged("Dsh6khiGjTuyExADXxjtDgz1gRr9C5dEUf6")
if err != nil {
diff --git a/db/dcrpg/pgblockchain_test.go b/db/dcrpg/pgblockchain_test.go
index 115df8b1b..09998c9e8 100644
--- a/db/dcrpg/pgblockchain_test.go
+++ b/db/dcrpg/pgblockchain_test.go
@@ -53,7 +53,7 @@ func TestInsertSwap(t *testing.T) {
Contract: []byte{1, 2, 3, 4, 5, 6, 7, 8}, // not stored
IsRefund: true,
}
- err = InsertSwap(db.db, 1234, asd)
+ err = insertSwap(db.db, 1234, asd)
if err != nil {
t.Fatal(err)
}
@@ -61,7 +61,7 @@ func TestInsertSwap(t *testing.T) {
asd.SpendTx = &chainhash.Hash{5, 6}
asd.SpendVin = 2
asd.Secret = nil
- err = InsertSwap(db.db, 1234, asd)
+ err = insertSwap(db.db, 1234, asd)
if err != nil {
t.Fatal(err)
}
@@ -141,7 +141,7 @@ func TestMergeRows(t *testing.T) {
}
func TestRetrieveUTXOs(t *testing.T) {
- utxos, err := RetrieveUTXOs(context.Background(), db.db)
+ utxos, err := retrieveUTXOs(context.Background(), db.db)
if err != nil {
t.Fatal(err)
}
@@ -159,7 +159,7 @@ func TestRetrieveUTXOs(t *testing.T) {
}
func TestUtxoStore_Reinit(t *testing.T) {
- utxos, err := RetrieveUTXOs(context.Background(), db.db)
+ utxos, err := retrieveUTXOs(context.Background(), db.db)
if err != nil {
t.Fatal(err)
}
@@ -195,7 +195,7 @@ func TestCheckDefaultTimeZone(t *testing.T) {
func TestDeleteBestBlock(t *testing.T) {
ctx := context.Background()
- res, height, hash, err := DeleteBestBlock(ctx, db.db)
+ res, height, hash, err := deleteBestBlock(ctx, db.db)
t.Logf("Deletion summary for block %d (%s): %v", height, hash, res)
if err != nil {
t.Errorf("Failed to delete best block data: %v", err)
@@ -217,7 +217,7 @@ func TestDeleteBestBlock(t *testing.T) {
}
func TestDeleteBlocks(t *testing.T) {
- height0, hash0, _, err := RetrieveBestBlockHeight(context.Background(), db.db)
+ height0, hash0, err := retrieveBestBlockHeight(context.Background(), db.db)
if err != nil {
t.Error(err)
}
@@ -233,7 +233,7 @@ func TestDeleteBlocks(t *testing.T) {
start := time.Now()
ctx := context.Background()
- res, _, _, err := DeleteBlocks(ctx, N, db.db)
+ res, _, _, err := deleteBlocks(ctx, N, db.db)
if err != nil {
t.Error(err)
}
@@ -245,7 +245,7 @@ func TestDeleteBlocks(t *testing.T) {
t.Log("*** Blocks deleted from DB! Resync or download new test data! ***")
t.Log("*****************************************************************")
- height, hash, _, err := RetrieveBestBlockHeight(ctx, db.db)
+ height, hash, err := retrieveBestBlockHeight(ctx, db.db)
if err != nil {
t.Error(err)
}
@@ -260,9 +260,8 @@ func TestDeleteBlocks(t *testing.T) {
func TestRetrieveTxsByBlockHash(t *testing.T) {
//block80740 := "00000000000003ae4fa13a6dcd53bf2fddacfac12e86e5b5f98a08a71d3e6caa"
- block0 := "298e5cc3d985bfe7f81dc135f360abe089edd4396b86d2de66b0cef42b21d980" // genesis
- _, _, _, _, blockTimes, _ := RetrieveTxsByBlockHash(
- context.Background(), db.db, block0)
+ block0, _ := chainHashFromStr("298e5cc3d985bfe7f81dc135f360abe089edd4396b86d2de66b0cef42b21d980") // genesis
+ _, _, _, blockTimes, _ := retrieveTxsByBlockHash(context.Background(), db.db, block0)
// Check TimeDef.String
blockTimeStr := blockTimes[0].String()
t.Log(blockTimeStr)
@@ -336,12 +335,14 @@ func TestUpdateChainState(t *testing.T) {
t.Fatalf("expected no error to be returned but found: %v", err)
}
+ bbh, _ := chainHashFromStr("00000000000000001d8cfa54dc13cfb0563421fd017801401cb2bdebe3579355")
+
// Expected payload
var expectedPayload = dbtypes.BlockChainData{
Chain: "mainnet",
SyncHeight: 316016,
BestHeight: 316016,
- BestBlockHash: "00000000000000001d8cfa54dc13cfb0563421fd017801401cb2bdebe3579355",
+ BestBlockHash: bbh,
Difficulty: 406452686,
VerificationProgress: 1.0,
ChainWork: "0000000000000000000000000000000000000000000209c779c196914f038522",
diff --git a/db/dcrpg/queries.go b/db/dcrpg/queries.go
index d06cf1f22..5d2f2b231 100644
--- a/db/dcrpg/queries.go
+++ b/db/dcrpg/queries.go
@@ -1,14 +1,13 @@
-// Copyright (c) 2018-2021, The Decred developers
+// Copyright (c) 2018-2023, The Decred developers
// Copyright (c) 2017, The dcrdata developers
// See LICENSE for details.
package dcrpg
import (
- "bytes"
"context"
"database/sql"
- "encoding/hex"
+ "database/sql/driver"
"errors"
"fmt"
"math/big"
@@ -16,10 +15,7 @@ import (
"time"
"github.com/decred/dcrd/blockchain/stake/v5"
- "github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/v3"
- "github.com/decred/dcrd/dcrutil/v4"
- "github.com/decred/dcrd/txscript/v4/stdscript"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrdata/db/dcrpg/v8/internal"
@@ -31,10 +27,10 @@ import (
"github.com/lib/pq"
)
-// DBBestBlock retrieves the best block hash and height from the meta table. The
+// dbBestBlock retrieves the best block hash and height from the meta table. The
// error value will never be sql.ErrNoRows; instead with height == -1 indicating
// no data in the meta table.
-func DBBestBlock(ctx context.Context, db *sql.DB) (hash string, height int64, err error) {
+func dbBestBlock(ctx context.Context, db *sql.DB) (hash dbtypes.ChainHash, height int64, err error) {
err = db.QueryRowContext(ctx, internal.SelectMetaDBBestBlock).Scan(&height, &hash)
if err == sql.ErrNoRows {
err = nil
@@ -43,8 +39,8 @@ func DBBestBlock(ctx context.Context, db *sql.DB) (hash string, height int64, er
return
}
-// SetDBBestBlock sets the best block hash and height in the meta table.
-func SetDBBestBlock(db *sql.DB, hash string, height int64) error {
+// setDBBestBlock sets the best block hash and height in the meta table.
+func setDBBestBlock(db *sql.DB, hash dbtypes.ChainHash, height int64) error {
numRows, err := sqlExec(db, internal.SetMetaDBBestBlock,
"failed to update best block in meta table: ", height, hash)
if err != nil {
@@ -57,16 +53,16 @@ func SetDBBestBlock(db *sql.DB, hash string, height int64) error {
return nil
}
-// IBDComplete indicates whether initial block download was completed according
+// ibdComplete indicates whether initial block download was completed according
// to the meta.ibd_complete flag.
-func IBDComplete(db *sql.DB) (ibdComplete bool, err error) {
+func ibdComplete(db *sql.DB) (ibdComplete bool, err error) {
err = db.QueryRow(internal.SelectMetaDBIbdComplete).Scan(&ibdComplete)
return
}
-// SetIBDComplete set the ibd_complete (Initial Block Download complete) flag in
+// setIBDComplete set the ibd_complete (Initial Block Download complete) flag in
// the meta table.
-func SetIBDComplete(db SqlExecutor, ibdComplete bool) error {
+func setIBDComplete(db SqlExecutor, ibdComplete bool) error {
numRows, err := sqlExec(db, internal.SetMetaDBIbdComplete,
"failed to update ibd_complete in meta table: ", ibdComplete)
if err != nil {
@@ -79,16 +75,7 @@ func SetIBDComplete(db SqlExecutor, ibdComplete bool) error {
return nil
}
-// outputCountType defines the modes of the output count chart data.
-// outputCountByAllBlocks defines count per block i.e. solo and pooled tickets
-// count per block. outputCountByTicketPoolWindow defines the output count per
-// given ticket price window
-type outputCountType int
-
const (
- outputCountByAllBlocks outputCountType = iota
- outputCountByTicketPoolWindow
-
notOneRowErrMsg = "failed to update exactly 1 row"
)
@@ -180,9 +167,9 @@ func IsUniqueIndex(db *sql.DB, indexName string) (isUnique bool, err error) {
return
}
-// DeleteDuplicateVins deletes rows in vin with duplicate tx information,
+// deleteDuplicateVins deletes rows in vin with duplicate tx information,
// leaving the one row with the lowest id.
-func DeleteDuplicateVins(db *sql.DB) (int64, error) {
+func deleteDuplicateVins(db *sql.DB) (int64, error) {
execErrPrefix := "failed to delete duplicate vins: "
existsIdx, err := ExistsIndex(db, "uix_vin")
@@ -204,9 +191,9 @@ func DeleteDuplicateVins(db *sql.DB) (int64, error) {
return sqlExec(db, internal.DeleteVinsDuplicateRows, execErrPrefix)
}
-// DeleteDuplicateVouts deletes rows in vouts with duplicate tx information,
+// deleteDuplicateVouts deletes rows in vouts with duplicate tx information,
// leaving the one row with the lowest id.
-func DeleteDuplicateVouts(db *sql.DB) (int64, error) {
+func deleteDuplicateVouts(db *sql.DB) (int64, error) {
execErrPrefix := "failed to delete duplicate vouts: "
existsIdx, err := ExistsIndex(db, "uix_vout_txhash_ind")
@@ -225,9 +212,9 @@ func DeleteDuplicateVouts(db *sql.DB) (int64, error) {
return sqlExec(db, internal.DeleteVoutDuplicateRows, execErrPrefix)
}
-// DeleteDuplicateTxns deletes rows in transactions with duplicate tx-block
+// deleteDuplicateTxns deletes rows in transactions with duplicate tx-block
// hashes, leaving the one row with the lowest id.
-func DeleteDuplicateTxns(db *sql.DB) (int64, error) {
+func deleteDuplicateTxns(db *sql.DB) (int64, error) {
execErrPrefix := "failed to delete duplicate transactions: "
existsIdx, err := ExistsIndex(db, "uix_tx_hashes")
@@ -246,9 +233,9 @@ func DeleteDuplicateTxns(db *sql.DB) (int64, error) {
return sqlExec(db, internal.DeleteTxDuplicateRows, execErrPrefix)
}
-// DeleteDuplicateAgendas deletes rows in agendas with duplicate names leaving
+// deleteDuplicateAgendas deletes rows in agendas with duplicate names leaving
// the one row with the lowest id.
-func DeleteDuplicateAgendas(db *sql.DB) (int64, error) {
+func deleteDuplicateAgendas(db *sql.DB) (int64, error) {
if isuniq, err := IsUniqueIndex(db, "uix_agendas_name"); err != nil && err != sql.ErrNoRows {
return 0, err
} else if isuniq {
@@ -258,9 +245,9 @@ func DeleteDuplicateAgendas(db *sql.DB) (int64, error) {
return sqlExec(db, internal.DeleteAgendasDuplicateRows, execErrPrefix)
}
-// DeleteDuplicateAgendaVotes deletes rows in agenda_votes with duplicate
+// deleteDuplicateAgendaVotes deletes rows in agenda_votes with duplicate
// votes-row-id and agendas-row-id leaving the one row with the lowest id.
-func DeleteDuplicateAgendaVotes(db *sql.DB) (int64, error) {
+func deleteDuplicateAgendaVotes(db *sql.DB) (int64, error) {
if isuniq, err := IsUniqueIndex(db, "uix_agenda_votes"); err != nil && err != sql.ErrNoRows {
return 0, err
} else if isuniq {
@@ -272,7 +259,7 @@ func DeleteDuplicateAgendaVotes(db *sql.DB) (int64, error) {
// --- stake (votes, tickets, misses, treasury) tables ---
-func InsertTreasuryTxns(db *sql.DB, dbTxns []*dbtypes.Tx, checked, updateExistingRecords bool) error {
+func insertTreasuryTxns(db *sql.DB, dbTxns []*dbtypes.Tx, checked, updateExistingRecords bool) error {
dbtx, err := db.Begin()
if err != nil {
return fmt.Errorf("unable to begin database transaction: %w", err)
@@ -321,11 +308,11 @@ func InsertTreasuryTxns(db *sql.DB, dbTxns []*dbtypes.Tx, checked, updateExistin
return dbtx.Commit()
}
-// InsertTickets takes a slice of *dbtypes.Tx and corresponding DB row IDs for
+// insertTickets takes a slice of *dbtypes.Tx and corresponding DB row IDs for
// transactions, extracts the tickets, and inserts the tickets into the
// database. Outputs are a slice of DB row IDs of the inserted tickets, and an
// error.
-func InsertTickets(db *sql.DB, dbTxns []*dbtypes.Tx, txDbIDs []uint64, checked, updateExistingRecords bool) ([]uint64, []*dbtypes.Tx, error) {
+func insertTickets(db *sql.DB, dbTxns []*dbtypes.Tx, txDbIDs []uint64, checked, updateExistingRecords bool) ([]uint64, []*dbtypes.Tx, error) {
dbtx, err := db.Begin()
if err != nil {
return nil, nil, fmt.Errorf("unable to begin database transaction: %w", err)
@@ -360,14 +347,15 @@ func InsertTickets(db *sql.DB, dbTxns []*dbtypes.Tx, txDbIDs []uint64, checked,
if len(tx.Vouts[0].ScriptPubKeyData.Addresses) > 0 {
stakesubmissionAddress = tx.Vouts[0].ScriptPubKeyData.Addresses[0]
}
- isScriptHash = stdscript.IsStakeSubmissionScriptHashScript(tx.Vouts[0].Version, tx.Vouts[0].ScriptPubKey)
+ isScriptHash = tx.Vouts[0].ScriptPubKeyData.Type == dbtypes.SCScriptHash
+ // isScriptHash = stdscript.IsStakeSubmissionScriptHashScript(tx.Vouts[0].Version, tx.Vouts[0].ScriptPubKey)
// NOTE: This was historically broken, always setting false, and
// calling it "isMultisig"! A DB upgrade is needed to identify old
// p2sh tickets, or just remove the is_multisig column entirely.
}
- price := dcrutil.Amount(tx.Vouts[0].Value).ToCoin()
- fee := dcrutil.Amount(tx.Fees).ToCoin()
+ price := toCoin(tx.Vouts[0].Value)
+ fee := toCoin(tx.Fees)
isSplit := tx.NumVin > 1
var id uint64
@@ -395,7 +383,7 @@ func InsertTickets(db *sql.DB, dbTxns []*dbtypes.Tx, txDbIDs []uint64, checked,
return ids, ticketTx, dbtx.Commit()
}
-// InsertVotes takes a slice of *dbtypes.Tx, which must contain all the stake
+// insertVotes takes a slice of *dbtypes.Tx, which must contain all the stake
// transactions in a block, extracts the votes, and inserts the votes into the
// database. The input MsgBlockPG contains each stake transaction's MsgTx in
// STransactions, and they must be in the same order as the dbtypes.Tx slice.
@@ -418,10 +406,10 @@ func InsertTickets(db *sql.DB, dbTxns []*dbtypes.Tx, txDbIDs []uint64, checked,
// information and references to the agendas and votes tables.
//
// Outputs are slices of DB row IDs for the votes and misses, and an error.
-func InsertVotes(db *sql.DB, dbTxns []*dbtypes.Tx, _ /*txDbIDs*/ []uint64, fTx *TicketTxnIDGetter,
+func insertVotes(db *sql.DB, dbTxns []*dbtypes.Tx, _ /*txDbIDs*/ []uint64, fTx *TicketTxnIDGetter,
msgBlock *MsgBlockPG, checked, updateExistingRecords bool, params *chaincfg.Params,
- votesMilestones *dbtypes.BlockChainData) ([]uint64, []*dbtypes.Tx, []string,
- []uint64, map[string]uint64, error) {
+ votesMilestones *dbtypes.BlockChainData) ([]uint64, []*dbtypes.Tx, []dbtypes.ChainHash,
+ []uint64, map[dbtypes.ChainHash]uint64, error) {
// Choose only SSGen txns
msgTxs := msgBlock.STransactions
var voteTxs []*dbtypes.Tx
@@ -533,11 +521,11 @@ func InsertVotes(db *sql.DB, dbTxns []*dbtypes.Tx, _ /*txDbIDs*/ []uint64, fTx *
// Insert each vote, and build list of missed votes equal to
// setdiff(Validators, votes).
- candidateBlockHash := msgBlock.Header.PrevBlock.String()
+ candidateBlockHash := dbtypes.ChainHash(msgBlock.Header.PrevBlock)
ids := make([]uint64, 0, len(voteTxs))
- spentTicketHashes := make([]string, 0, len(voteTxs))
+ spentTicketHashes := make([]dbtypes.ChainHash, 0, len(voteTxs))
spentTicketDbIDs := make([]uint64, 0, len(voteTxs))
- misses := make([]string, len(msgBlock.Validators))
+ misses := make([]dbtypes.ChainHash, len(msgBlock.Validators))
copy(misses, msgBlock.Validators)
for i, tx := range voteTxs {
msgTx := voteMsgTxs[i]
@@ -548,9 +536,9 @@ func InsertVotes(db *sql.DB, dbTxns []*dbtypes.Tx, _ /*txDbIDs*/ []uint64, fTx *
return nil, nil, nil, nil, nil, err
}
- voteReward := dcrutil.Amount(msgTx.TxIn[0].ValueIn).ToCoin()
- stakeSubmissionAmount := dcrutil.Amount(msgTx.TxIn[1].ValueIn).ToCoin()
- stakeSubmissionTxHash := msgTx.TxIn[1].PreviousOutPoint.Hash.String()
+ voteReward := toCoin(msgTx.TxIn[0].ValueIn)
+ stakeSubmissionAmount := toCoin(msgTx.TxIn[1].ValueIn)
+ stakeSubmissionTxHash := dbtypes.ChainHash(msgTx.TxIn[1].PreviousOutPoint.Hash)
spentTicketHashes = append(spentTicketHashes, stakeSubmissionTxHash)
// Lookup the row ID in the transactions table for the ticket purchase.
@@ -657,7 +645,7 @@ func InsertVotes(db *sql.DB, dbTxns []*dbtypes.Tx, _ /*txDbIDs*/ []uint64, fTx *
}
// Store missed tickets.
- missHashMap := make(map[string]uint64)
+ missHashMap := make(map[dbtypes.ChainHash]uint64)
if len(misses) > 0 {
// Insert misses, optionally updating a row if it conflicts with the
// unique index on (ticket_hash, block_hash).
@@ -670,7 +658,7 @@ func InsertVotes(db *sql.DB, dbTxns []*dbtypes.Tx, _ /*txDbIDs*/ []uint64, fTx *
// Insert the miss in the misses table, and store the row ID of the
// new/existing/updated miss.
- blockHash := msgBlock.BlockHash().String()
+ blockHash := dbtypes.ChainHash(msgBlock.BlockHash())
for i := range misses {
var id uint64
err = stmtMissed.QueryRow(
@@ -694,9 +682,9 @@ func InsertVotes(db *sql.DB, dbTxns []*dbtypes.Tx, _ /*txDbIDs*/ []uint64, fTx *
return ids, voteTxs, spentTicketHashes, spentTicketDbIDs, missHashMap, dbtx.Commit()
}
-// RetrieveMissedVotesInBlock gets a list of ticket hashes that were called to
+// retrieveMissedVotesInBlock gets a list of ticket hashes that were called to
// vote in the given block, but missed their vote.
-func RetrieveMissedVotesInBlock(ctx context.Context, db *sql.DB, blockHash string) (ticketHashes []string, err error) {
+func retrieveMissedVotesInBlock(ctx context.Context, db *sql.DB, blockHash dbtypes.ChainHash) (ticketHashes []dbtypes.ChainHash, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectMissesInBlock, blockHash)
if err != nil {
@@ -706,7 +694,7 @@ func RetrieveMissedVotesInBlock(ctx context.Context, db *sql.DB, blockHash strin
defer closeRows(rows)
for rows.Next() {
- var hash string
+ var hash dbtypes.ChainHash
err = rows.Scan(&hash)
if err != nil {
return
@@ -729,10 +717,11 @@ func retrieveMissedVotesForBlockRange(ctx context.Context, db *sql.DB, startHeig
return
}
-// RetrieveMissesForTicket gets all of the blocks in which the ticket was called
+// retrieveMissesForTicket gets all of the blocks in which the ticket was called
// to place a vote on the previous block. The previous block that would have
// been validated by the vote is not the block data that is returned.
-func RetrieveMissesForTicket(ctx context.Context, db *sql.DB, ticketHash string) (blockHashes []string, blockHeights []int64, err error) {
+/*
+func retrieveMissesForTicket(ctx context.Context, db *sql.DB, ticketHash string) (blockHashes []string, blockHeights []int64, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectMissesForTicket, ticketHash)
if err != nil {
@@ -756,11 +745,12 @@ func RetrieveMissesForTicket(ctx context.Context, db *sql.DB, ticketHash string)
return
}
+*/
-// RetrieveMissForTicket gets the mainchain block in which the ticket was called
+// retrieveMissForTicket gets the mainchain block in which the ticket was called
// to place a vote on the previous block. The previous block that would have
// been validated by the vote is not the block data that is returned.
-func RetrieveMissForTicket(ctx context.Context, db *sql.DB, ticketHash string) (blockHash string, blockHeight int64, err error) {
+func retrieveMissForTicket(ctx context.Context, db *sql.DB, ticketHash dbtypes.ChainHash) (blockHash dbtypes.ChainHash, blockHeight int64, err error) {
err = db.QueryRowContext(ctx, internal.SelectMissesMainchainForTicket,
ticketHash).Scan(&blockHeight, &blockHash)
return
@@ -792,13 +782,14 @@ func retrieveAllAgendas(db *sql.DB) (map[string]dbtypes.MileStone, error) {
return currentMilestones, err
}
-// RetrieveAllRevokes gets for all ticket revocations the row IDs (primary
+// retrieveAllRevokes gets for all ticket revocations the row IDs (primary
// keys), transaction hashes, block heights. It also gets the row ID in the vins
// table for the first input of the revocation transaction, which should
// correspond to the stakesubmission previous outpoint of the ticket purchase.
// This function is used in UpdateSpendingInfoInAllTickets, so it should not be
// subject to timeouts.
-func RetrieveAllRevokes(ctx context.Context, db *sql.DB) (ids []uint64, hashes []string, heights []int64, vinDbIDs []uint64, err error) {
+/*
+func retrieveAllRevokes(ctx context.Context, db *sql.DB) (ids []uint64, hashes []string, heights []int64, vinDbIDs []uint64, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectAllRevokes)
if err != nil {
@@ -825,12 +816,14 @@ func RetrieveAllRevokes(ctx context.Context, db *sql.DB) (ids []uint64, hashes [
return
}
+*/
-// RetrieveAllVotesDbIDsHeightsTicketDbIDs gets for all votes the row IDs
+// retrieveAllVotesDbIDsHeightsTicketDbIDs gets for all votes the row IDs
// (primary keys) in the votes table, the block heights, and the row IDs in the
// tickets table of the spent tickets. This function is used in
// UpdateSpendingInfoInAllTickets, so it should not be subject to timeouts.
-func RetrieveAllVotesDbIDsHeightsTicketDbIDs(ctx context.Context, db *sql.DB) (ids []uint64, heights []int64,
+/*
+func retrieveAllVotesDbIDsHeightsTicketDbIDs(ctx context.Context, db *sql.DB) (ids []uint64, heights []int64,
ticketDbIDs []uint64, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectAllVoteDbIDsHeightsTicketDbIDs)
@@ -855,6 +848,7 @@ func RetrieveAllVotesDbIDsHeightsTicketDbIDs(ctx context.Context, db *sql.DB) (i
return
}
+*/
// retrieveWindowBlocks fetches chunks of windows using the limit and offset provided
// for a window size of chaincfg.Params.StakeDiffWindowSize.
@@ -955,8 +949,8 @@ func retrieveTimeBasedBlockListing(ctx context.Context, db *sql.DB, timeInterval
return data, nil
}
-// RetrieveUnspentTickets gets all unspent tickets.
-func RetrieveUnspentTickets(ctx context.Context, db *sql.DB) (ids []uint64, hashes []string, err error) {
+// retrieveUnspentTickets gets all unspent tickets.
+func retrieveUnspentTickets(ctx context.Context, db *sql.DB) (ids []uint64, hashes []dbtypes.ChainHash, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectUnspentTickets)
if err != nil {
@@ -966,7 +960,7 @@ func RetrieveUnspentTickets(ctx context.Context, db *sql.DB) (ids []uint64, hash
for rows.Next() {
var id uint64
- var hash string
+ var hash dbtypes.ChainHash
err = rows.Scan(&id, &hash)
if err != nil {
return nil, nil, err
@@ -982,29 +976,29 @@ func RetrieveUnspentTickets(ctx context.Context, db *sql.DB) (ids []uint64, hash
return ids, hashes, nil
}
-// RetrieveTicketIDByHashNoCancel gets the db row ID (primary key) in the
+// retrieveTicketIDByHashNoCancel gets the db row ID (primary key) in the
// tickets table for the given ticket hash. As the name implies, this query
// should not accept a cancelable context.
-func RetrieveTicketIDByHashNoCancel(db *sql.DB, ticketHash string) (id uint64, err error) {
+func retrieveTicketIDByHashNoCancel(db *sql.DB, ticketHash dbtypes.ChainHash) (id uint64, err error) {
err = db.QueryRow(internal.SelectTicketIDByHash, ticketHash).Scan(&id)
return
}
-// RetrieveTicketStatusByHash gets the spend status and ticket pool status for
+// retrieveTicketStatusByHash gets the spend status and ticket pool status for
// the given ticket hash.
-func RetrieveTicketStatusByHash(ctx context.Context, db *sql.DB, ticketHash string) (id uint64,
+func retrieveTicketStatusByHash(ctx context.Context, db *sql.DB, ticketHash dbtypes.ChainHash) (id uint64,
spendStatus dbtypes.TicketSpendType, poolStatus dbtypes.TicketPoolStatus, err error) {
err = db.QueryRowContext(ctx, internal.SelectTicketStatusByHash, ticketHash).
Scan(&id, &spendStatus, &poolStatus)
return
}
-// RetrieveTicketInfoByHash retrieves the ticket spend and pool statuses as well
+// retrieveTicketInfoByHash retrieves the ticket spend and pool statuses as well
// as the purchase and spending block info and spending txid.
-func RetrieveTicketInfoByHash(ctx context.Context, db *sql.DB, ticketHash string) (spendStatus dbtypes.TicketSpendType,
- poolStatus dbtypes.TicketPoolStatus, purchaseBlock, lotteryBlock *apitypes.TinyBlock, spendTxid string, err error) {
+func retrieveTicketInfoByHash(ctx context.Context, db *sql.DB, ticketHash dbtypes.ChainHash) (spendStatus dbtypes.TicketSpendType,
+ poolStatus dbtypes.TicketPoolStatus, purchaseBlock, lotteryBlock *apitypes.TinyBlock, spendTxid dbtypes.ChainHash, err error) {
var dbid sql.NullInt64
- var purchaseHash, spendHash string
+ var purchaseHash, spendHash dbtypes.ChainHash
var purchaseHeight, spendHeight uint32
err = db.QueryRowContext(ctx, internal.SelectTicketInfoByHash, ticketHash).
Scan(&purchaseHash, &purchaseHeight, &spendStatus, &poolStatus, &dbid)
@@ -1013,7 +1007,7 @@ func RetrieveTicketInfoByHash(ctx context.Context, db *sql.DB, ticketHash string
}
purchaseBlock = &apitypes.TinyBlock{
- Hash: purchaseHash,
+ Hash: purchaseHash.String(),
Height: purchaseHeight,
}
@@ -1035,7 +1029,7 @@ func RetrieveTicketInfoByHash(ctx context.Context, db *sql.DB, ticketHash string
if spendStatus == dbtypes.TicketVoted {
lotteryBlock = &apitypes.TinyBlock{
- Hash: spendHash,
+ Hash: spendHash.String(),
Height: spendHeight,
}
}
@@ -1043,9 +1037,10 @@ func RetrieveTicketInfoByHash(ctx context.Context, db *sql.DB, ticketHash string
return
}
-// RetrieveTicketIDsByHashes gets the db row IDs (primary keys) in the tickets
+// retrieveTicketIDsByHashes gets the db row IDs (primary keys) in the tickets
// table for the given ticket purchase transaction hashes.
-func RetrieveTicketIDsByHashes(ctx context.Context, db *sql.DB, ticketHashes []string) (ids []uint64, err error) {
+/*
+func retrieveTicketIDsByHashes(ctx context.Context, db *sql.DB, ticketHashes []string) (ids []uint64, err error) {
var dbtx *sql.Tx
dbtx, err = db.BeginTx(ctx, &sql.TxOptions{
Isolation: sql.LevelDefault,
@@ -1081,6 +1076,7 @@ func RetrieveTicketIDsByHashes(ctx context.Context, db *sql.DB, ticketHashes []s
return ids, dbtx.Commit()
}
+*/
// retrieveTicketsByDate fetches the tickets in the current ticketpool order by the
// purchase date. The maturity block is needed to identify immature tickets.
@@ -1097,7 +1093,7 @@ func retrieveTicketsByDate(ctx context.Context, db *sql.DB, maturityBlock int64,
for rows.Next() {
var immature, live uint64
var timestamp time.Time
- var price, total float64
+ var price float64
err = rows.Scan(×tamp, &price, &immature, &live)
if err != nil {
return nil, fmt.Errorf("retrieveTicketsByDate: %w", err)
@@ -1108,9 +1104,8 @@ func retrieveTicketsByDate(ctx context.Context, db *sql.DB, maturityBlock int64,
tickets.Live = append(tickets.Live, live)
// Returns the average value of a ticket depending on the grouping mode used
- price *= 100000000
- total = float64(live + immature)
- tickets.Price = append(tickets.Price, dcrutil.Amount(price/total).ToCoin())
+ avg := uint64(price*1e8) / (live + immature)
+ tickets.Price = append(tickets.Price, toCoin(avg))
}
if err = rows.Err(); err != nil {
return nil, err
@@ -1183,9 +1178,9 @@ func retrieveTicketsGroupedByType(ctx context.Context, db *sql.DB) (*dbtypes.Poo
return tickets, nil
}
-// SetPoolStatusForTickets sets the ticket pool status for the tickets specified
+// setPoolStatusForTickets sets the ticket pool status for the tickets specified
// by db row ID.
-func SetPoolStatusForTickets(db *sql.DB, ticketDbIDs []uint64, poolStatuses []dbtypes.TicketPoolStatus) (int64, error) {
+func setPoolStatusForTickets(db *sql.DB, ticketDbIDs []uint64, poolStatuses []dbtypes.TicketPoolStatus) (int64, error) {
if len(ticketDbIDs) == 0 {
return 0, nil
}
@@ -1225,6 +1220,7 @@ func SetPoolStatusForTickets(db *sql.DB, ticketDbIDs []uint64, poolStatuses []db
// SetPoolStatusForTicketsByHash sets the ticket pool status for the tickets
// specified by ticket purchase transaction hash.
+/*
func SetPoolStatusForTicketsByHash(db *sql.DB, tickets []string,
poolStatuses []dbtypes.TicketPoolStatus) (int64, error) {
if len(tickets) == 0 {
@@ -1264,11 +1260,12 @@ func SetPoolStatusForTicketsByHash(db *sql.DB, tickets []string,
return totalTicketsUpdated, dbtx.Commit()
}
+*/
-// SetSpendingForTickets sets the spend type, spend height, spending transaction
+// setSpendingForTickets sets the spend type, spend height, spending transaction
// row IDs (in the table relevant to the spend type), and ticket pool status for
// the given tickets specified by their db row IDs.
-func SetSpendingForTickets(db *sql.DB, ticketDbIDs, spendDbIDs []uint64,
+func setSpendingForTickets(db *sql.DB, ticketDbIDs, spendDbIDs []uint64,
blockHeights []int64, spendTypes []dbtypes.TicketSpendType,
poolStatuses []dbtypes.TicketPoolStatus) (int64, error) {
dbtx, err := db.Begin()
@@ -1309,6 +1306,7 @@ func SetSpendingForTickets(db *sql.DB, ticketDbIDs, spendDbIDs []uint64,
// InsertAddressRow inserts an AddressRow (input or output), returning the row
// ID in the addresses table of the inserted data.
+/*
func InsertAddressRow(db *sql.DB, dbA *dbtypes.AddressRow, dupCheck, updateExistingRecords bool) (uint64, error) {
sqlStmt := internal.MakeAddressRowInsertStatement(dupCheck, updateExistingRecords)
var id uint64
@@ -1317,11 +1315,12 @@ func InsertAddressRow(db *sql.DB, dbA *dbtypes.AddressRow, dupCheck, updateExist
dbA.IsFunding, dbA.ValidMainChain, dbA.TxType).Scan(&id)
return id, err
}
+*/
-// InsertAddressRowsDbTx is like InsertAddressRows, except that it takes a
+// insertAddressRowsDbTx is like InsertAddressRows, except that it takes a
// sql.Tx. The caller is required to Commit or Rollback the transaction
// depending on the returned error value.
-func InsertAddressRowsDbTx(dbTx *sql.Tx, dbAs []*dbtypes.AddressRow, dupCheck, updateExistingRecords bool) ([]uint64, error) {
+func insertAddressRowsDbTx(dbTx *sql.Tx, dbAs []*dbtypes.AddressRow, dupCheck, updateExistingRecords bool) ([]uint64, error) {
// Prepare the addresses row insert statement.
stmt, err := dbTx.Prepare(internal.MakeAddressRowInsertStatement(dupCheck, updateExistingRecords))
if err != nil {
@@ -1352,6 +1351,7 @@ func InsertAddressRowsDbTx(dbTx *sql.Tx, dbAs []*dbtypes.AddressRow, dupCheck, u
return ids, nil
}
+/*
// InsertAddressRows inserts multiple transaction inputs or outputs for certain
// addresses ([]AddressRow). The row IDs of the inserted data are returned.
func InsertAddressRows(db *sql.DB, dbAs []*dbtypes.AddressRow, dupCheck, updateExistingRecords bool) ([]uint64, error) {
@@ -1370,30 +1370,31 @@ func InsertAddressRows(db *sql.DB, dbAs []*dbtypes.AddressRow, dupCheck, updateE
return ids, dbtx.Commit()
}
-func RetrieveAddressUnspent(ctx context.Context, db *sql.DB, address string) (count, totalAmount int64, err error) {
+func retrieveAddressUnspent(ctx context.Context, db *sql.DB, address string) (count, totalAmount int64, err error) {
err = db.QueryRowContext(ctx, internal.SelectAddressUnspentCountANDValue, address).
Scan(&count, &totalAmount)
return
}
-func RetrieveAddressSpent(ctx context.Context, db *sql.DB, address string) (count, totalAmount int64, err error) {
+func retrieveAddressSpent(ctx context.Context, db *sql.DB, address string) (count, totalAmount int64, err error) {
err = db.QueryRowContext(ctx, internal.SelectAddressSpentCountANDValue, address).
Scan(&count, &totalAmount)
return
}
+*/
// retrieveAddressTxsCount return the number of record groups, where grouping is
// done by a specified time interval, for an address.
-func retrieveAddressTxsCount(ctx context.Context, db *sql.DB, address, interval string) (count int64, err error) {
- err = db.QueryRowContext(ctx, internal.MakeSelectAddressTimeGroupingCount(interval), address).Scan(&count)
- return
-}
+// func retrieveAddressTxsCount(ctx context.Context, db *sql.DB, address, interval string) (count int64, err error) {
+// err = db.QueryRowContext(ctx, internal.MakeSelectAddressTimeGroupingCount(interval), address).Scan(&count)
+// return
+// }
-// RetrieveAddressBalance gets the numbers of spent and unspent outpoints
+// retrieveAddressBalance gets the numbers of spent and unspent outpoints
// for the given address, the total amounts spent and unspent, the number of
// distinct spending transactions, and the fraction spent to and received from
// stake-related transactions.
-func RetrieveAddressBalance(ctx context.Context, db *sql.DB, address string) (balance *dbtypes.AddressBalance, err error) {
+func retrieveAddressBalance(ctx context.Context, db *sql.DB, address string) (balance *dbtypes.AddressBalance, err error) {
// Never return nil *AddressBalance.
balance = &dbtypes.AddressBalance{Address: address}
@@ -1470,15 +1471,15 @@ func RetrieveAddressBalance(ctx context.Context, db *sql.DB, address string) (ba
return
}
-func CountMergedSpendingTxns(ctx context.Context, db *sql.DB, address string) (count int64, err error) {
+func countMergedSpendingTxns(ctx context.Context, db *sql.DB, address string) (count int64, err error) {
return countMerged(ctx, db, address, internal.SelectAddressesMergedSpentCount)
}
-func CountMergedFundingTxns(ctx context.Context, db *sql.DB, address string) (count int64, err error) {
+func countMergedFundingTxns(ctx context.Context, db *sql.DB, address string) (count int64, err error) {
return countMerged(ctx, db, address, internal.SelectAddressesMergedFundingCount)
}
-func CountMergedTxns(ctx context.Context, db *sql.DB, address string) (count int64, err error) {
+func countMergedTxns(ctx context.Context, db *sql.DB, address string) (count int64, err error) {
return countMerged(ctx, db, address, internal.SelectAddressesMergedCount)
}
@@ -1514,10 +1515,11 @@ func countMerged(ctx context.Context, db *sql.DB, address, query string) (count
return
}
-// RetrieveAddressUTXOs gets the unspent transaction outputs (UTXOs) paying to
+// retrieveAddressUTXOs gets the unspent transaction outputs (UTXOs) paying to
// the specified address as a []*apitypes.AddressTxnOutput. The input current
// block height is used to compute confirmations of the located transactions.
-func RetrieveAddressUTXOs(ctx context.Context, db *sql.DB, address string, currentBlockHeight int64) ([]*apitypes.AddressTxnOutput, error) {
+/*
+func retrieveAddressUTXOs(ctx context.Context, db *sql.DB, address string, currentBlockHeight int64) ([]*apitypes.AddressTxnOutput, error) {
stmt, err := db.Prepare(internal.SelectAddressUnspentWithTxn)
if err != nil {
log.Error(err)
@@ -1543,8 +1545,8 @@ func RetrieveAddressUTXOs(ctx context.Context, db *sql.DB, address string, curre
return nil, err
}
txnOutput.BlockTime = blockTime.UNIX()
- txnOutput.ScriptPubKey = hex.EncodeToString(pkScript)
- txnOutput.Amount = dcrutil.Amount(atoms).ToCoin()
+ txnOutput.ScriptPubKey = pkScript
+ txnOutput.Amount = toCoin(atoms)
txnOutput.Satoshis = atoms
txnOutput.Height = blockHeight
txnOutput.Confirmations = currentBlockHeight - blockHeight + 1
@@ -1556,11 +1558,12 @@ func RetrieveAddressUTXOs(ctx context.Context, db *sql.DB, address string, curre
return outputs, nil
}
+*/
-// RetrieveAddressDbUTXOs gets the unspent transaction outputs (UTXOs) paying to
+// retrieveAddressDbUTXOs gets the unspent transaction outputs (UTXOs) paying to
// the specified address as a []*dbtypes.AddressTxnOutput. The input current
// block height is used to compute confirmations of the located transactions.
-func RetrieveAddressDbUTXOs(ctx context.Context, db *sql.DB, address string) ([]*dbtypes.AddressTxnOutput, error) {
+func retrieveAddressDbUTXOs(ctx context.Context, db *sql.DB, address string) ([]*dbtypes.AddressTxnOutput, error) {
stmt, err := db.Prepare(internal.SelectAddressUnspentWithTxn)
if err != nil {
log.Error(err)
@@ -1576,23 +1579,15 @@ func RetrieveAddressDbUTXOs(ctx context.Context, db *sql.DB, address string) ([]
var outputs []*dbtypes.AddressTxnOutput
for rows.Next() {
- pkScript := []byte{}
- var txHash string
var blockTime dbtypes.TimeDef
txnOutput := new(dbtypes.AddressTxnOutput)
- if err = rows.Scan(&txnOutput.Address, &txHash,
+ if err = rows.Scan(&txnOutput.Address, &txnOutput.TxHash,
&txnOutput.Atoms, &txnOutput.Height, &blockTime,
- &txnOutput.Vout, &pkScript); err != nil {
+ &txnOutput.Vout); err != nil {
log.Error(err)
return nil, err
}
txnOutput.BlockTime = blockTime.UNIX()
- err = chainhash.Decode(&txnOutput.TxHash, txHash)
- if err != nil {
- log.Error(err)
- return nil, err
- }
- txnOutput.PkScript = hex.EncodeToString(pkScript)
outputs = append(outputs, txnOutput)
}
if err = rows.Err(); err != nil {
@@ -1602,11 +1597,12 @@ func RetrieveAddressDbUTXOs(ctx context.Context, db *sql.DB, address string) ([]
return outputs, nil
}
-// RetrieveAddressTxnsOrdered will get all transactions for addresses provided
+// retrieveAddressTxnsOrdered will get all transactions for addresses provided
// and return them sorted by time in descending order. It will also return a
// short list of recently (defined as greater than recentBlockHeight) confirmed
// transactions that can be used to validate mempool status.
-func RetrieveAddressTxnsOrdered(ctx context.Context, db *sql.DB, addresses []string,
+/*
+func retrieveAddressTxnsOrdered(ctx context.Context, db *sql.DB, addresses []string,
recentBlockTime int64) (txs, recenttxs []chainhash.Hash, err error) {
var stmt *sql.Stmt
stmt, err = db.Prepare(internal.SelectAddressesAllTxn)
@@ -1643,10 +1639,12 @@ func RetrieveAddressTxnsOrdered(ctx context.Context, db *sql.DB, addresses []str
return
}
+*/
-// RetrieveAllAddressTxns retrieves all rows of the address table pertaining to
+// retrieveAllAddressTxns retrieves all rows of the address table pertaining to
// the given address.
-func RetrieveAllAddressTxns(ctx context.Context, db *sql.DB, address string) ([]*dbtypes.AddressRow, error) {
+/*
+func retrieveAllAddressTxns(ctx context.Context, db *sql.DB, address string) ([]*dbtypes.AddressRow, error) {
rows, err := db.QueryContext(ctx, internal.SelectAddressAllByAddress, address)
if err != nil {
return nil, err
@@ -1655,11 +1653,13 @@ func RetrieveAllAddressTxns(ctx context.Context, db *sql.DB, address string) ([]
return scanAddressQueryRows(rows, creditDebitQuery)
}
+*/
-// RetrieveAllMainchainAddressTxns retrieves all non-merged and valid_mainchain
+// retrieveAllMainchainAddressTxns retrieves all non-merged and valid_mainchain
// rows of the address table pertaining to the given address. For a limited
-// query, use RetrieveAddressTxns.
-func RetrieveAllMainchainAddressTxns(ctx context.Context, db *sql.DB, address string) ([]*dbtypes.AddressRow, error) {
+// query, use retrieveAddressTxns.
+/*
+func retrieveAllMainchainAddressTxns(ctx context.Context, db *sql.DB, address string) ([]*dbtypes.AddressRow, error) {
rows, err := db.QueryContext(ctx, internal.SelectAddressAllMainchainByAddress, address)
if err != nil {
return nil, err
@@ -1668,12 +1668,14 @@ func RetrieveAllMainchainAddressTxns(ctx context.Context, db *sql.DB, address st
return scanAddressQueryRows(rows, creditDebitQuery)
}
+*/
-// RetrieveAllAddressMergedTxns retrieves all merged rows of the address table
+// retrieveAllAddressMergedTxns retrieves all merged rows of the address table
// pertaining to the given address. Specify only valid_mainchain=true rows via
// the onlyValidMainchain argument. For a limited query, use
-// RetrieveAddressMergedTxns.
-func RetrieveAllAddressMergedTxns(ctx context.Context, db *sql.DB, address string, onlyValidMainchain bool) ([]uint64, []*dbtypes.AddressRow, error) {
+// retrieveAddressMergedTxns.
+/*
+func retrieveAllAddressMergedTxns(ctx context.Context, db *sql.DB, address string, onlyValidMainchain bool) ([]uint64, []*dbtypes.AddressRow, error) {
rows, err := db.QueryContext(ctx, internal.SelectAddressMergedViewAll, address)
if err != nil {
return nil, nil, err
@@ -1684,44 +1686,25 @@ func RetrieveAllAddressMergedTxns(ctx context.Context, db *sql.DB, address strin
onlyValidMainchain)
return nil, addr, err
}
+*/
// Regular (non-merged) address transactions queries.
-func RetrieveAddressTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
- return retrieveAddressTxns(ctx, db, address, N, offset,
+func retrieveAddressTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
+ return retrieveAddressTxnsStmt(ctx, db, address, N, offset,
internal.SelectAddressLimitNByAddress, creditDebitQuery)
}
-func RetrieveAddressDebitTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
- return retrieveAddressTxns(ctx, db, address, N, offset,
- internal.SelectAddressDebitsLimitNByAddress, creditQuery)
-}
-
-func RetrieveAddressCreditTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
- return retrieveAddressTxns(ctx, db, address, N, offset,
- internal.SelectAddressCreditsLimitNByAddress, debitQuery)
-}
-
// Merged address transactions queries.
-func RetrieveAddressMergedDebitTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
- return retrieveAddressTxns(ctx, db, address, N, offset,
- internal.SelectAddressMergedDebitView, mergedDebitQuery)
-}
-
-func RetrieveAddressMergedCreditTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
- return retrieveAddressTxns(ctx, db, address, N, offset,
- internal.SelectAddressMergedCreditView, mergedCreditQuery)
-}
-
-func RetrieveAddressMergedTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
- return retrieveAddressTxns(ctx, db, address, N, offset,
+func retrieveAddressMergedTxns(ctx context.Context, db *sql.DB, address string, N, offset int64) ([]*dbtypes.AddressRow, error) {
+ return retrieveAddressTxnsStmt(ctx, db, address, N, offset,
internal.SelectAddressMergedView, mergedQuery)
}
// Address transaction query helpers.
-func retrieveAddressTxns(ctx context.Context, db *sql.DB, address string, N, offset int64,
+func retrieveAddressTxnsStmt(ctx context.Context, db *sql.DB, address string, N, offset int64,
statement string, queryType int) ([]*dbtypes.AddressRow, error) {
rows, err := db.QueryContext(ctx, statement, address, N, offset)
if err != nil {
@@ -1784,10 +1767,9 @@ func scanAddressQueryRows(rows *sql.Rows, queryType int) (addressRows []*dbtypes
for rows.Next() {
var id uint64
var addr dbtypes.AddressRow
- var matchingTxHash sql.NullString
var txVinIndex, vinDbID sql.NullInt64
- err = rows.Scan(&id, &addr.Address, &matchingTxHash, &addr.TxHash, &addr.TxType,
+ err = rows.Scan(&id, &addr.Address, &addr.MatchingTxHash, &addr.TxHash, &addr.TxType,
&addr.ValidMainChain, &txVinIndex, &addr.TxBlockTime, &vinDbID,
&addr.Value, &addr.IsFunding)
@@ -1810,9 +1792,6 @@ func scanAddressQueryRows(rows *sql.Rows, queryType int) (addressRows []*dbtypes
log.Warnf("Unrecognized addresses query type: %d", queryType)
}
- if matchingTxHash.Valid {
- addr.MatchingTxHash = matchingTxHash.String
- }
if txVinIndex.Valid {
addr.TxVinVoutIndex = uint32(txVinIndex.Int64)
}
@@ -1827,9 +1806,9 @@ func scanAddressQueryRows(rows *sql.Rows, queryType int) (addressRows []*dbtypes
return
}
-// RetrieveAddressIDsByOutpoint gets all address row IDs, addresses, and values
+// retrieveAddressIDsByOutpoint gets all address row IDs, addresses, and values
// for a given outpoint.
-func RetrieveAddressIDsByOutpoint(ctx context.Context, db *sql.DB, txHash string, voutIndex uint32) ([]uint64, []string, int64, error) {
+func retrieveAddressIDsByOutpoint(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash, voutIndex uint32) ([]uint64, []string, int64, error) {
var ids []uint64
var addresses []string
var value int64
@@ -1860,10 +1839,10 @@ func RetrieveAddressIDsByOutpoint(ctx context.Context, db *sql.DB, txHash string
// retrieveOldestTxBlockTime helps choose the most appropriate address page
// graph grouping to load by default depending on when the first transaction to
// the specific address was made.
-func retrieveOldestTxBlockTime(ctx context.Context, db *sql.DB, addr string) (blockTime dbtypes.TimeDef, err error) {
- err = db.QueryRowContext(ctx, internal.SelectAddressOldestTxBlockTime, addr).Scan(&blockTime)
- return
-}
+// func retrieveOldestTxBlockTime(ctx context.Context, db *sql.DB, addr string) (blockTime dbtypes.TimeDef, err error) {
+// err = db.QueryRowContext(ctx, internal.SelectAddressOldestTxBlockTime, addr).Scan(&blockTime)
+// return
+// }
// retrieveTxHistoryByType fetches the transaction types count for all the
// transactions associated with a given address for the given time interval.
@@ -1881,7 +1860,8 @@ func retrieveTxHistoryByType(ctx context.Context, db *sql.DB, addr, timeInterval
items := new(dbtypes.ChartsData)
for rows.Next() {
var blockTime time.Time
- var sentRtx, receivedRtx, tickets, votes, revokeTx uint64
+ var tickets, votes, revokeTx uint32
+ var sentRtx, receivedRtx uint64
err = rows.Scan(&blockTime, &sentRtx, &receivedRtx, &tickets, &votes, &revokeTx)
if err != nil {
return nil, err
@@ -1924,6 +1904,7 @@ func retrieveTxHistoryByAmountFlow(ctx context.Context, db *sql.DB, addr, timeIn
// the new/updated/conflicting row is returned. The updateOnConflict argument
// may be omitted, in which case an upsert will be favored over no nothing, but
// only if checked=true.
+/*
func InsertVin(db *sql.DB, dbVin dbtypes.VinTxProperty, checked bool, updateOnConflict ...bool) (id uint64, err error) {
doUpsert := true
if len(updateOnConflict) > 0 {
@@ -1936,10 +1917,11 @@ func InsertVin(db *sql.DB, dbVin dbtypes.VinTxProperty, checked bool, updateOnCo
dbVin.TxType).Scan(&id)
return
}
+*/
-// InsertVinsStmt is like InsertVins, except that it takes a sql.Stmt. The
+// insertVinsStmt is like InsertVins, except that it takes a sql.Stmt. The
// caller is required to Close the transaction.
-func InsertVinsStmt(stmt *sql.Stmt, dbVins dbtypes.VinTxPropertyARRAY, checked bool, doUpsert bool) ([]uint64, error) {
+func insertVinsStmt(stmt *sql.Stmt, dbVins dbtypes.VinTxPropertyARRAY) ([]uint64, error) {
// TODO/Question: Should we skip inserting coinbase txns, which have same PrevTxHash?
ids := make([]uint64, 0, len(dbVins))
for _, vin := range dbVins {
@@ -1959,6 +1941,7 @@ func InsertVinsStmt(stmt *sql.Stmt, dbVins dbtypes.VinTxPropertyARRAY, checked b
// InsertVinsDbTxn is like InsertVins, except that it takes a sql.Tx. The caller
// is required to Commit or Rollback the transaction depending on the returned
// error value.
+/*
func InsertVinsDbTxn(dbTx *sql.Tx, dbVins dbtypes.VinTxPropertyARRAY, checked bool, doUpsert bool) ([]uint64, error) {
stmt, err := dbTx.Prepare(internal.MakeVinInsertStatement(checked, doUpsert))
if err != nil {
@@ -1977,8 +1960,10 @@ func InsertVinsDbTxn(dbTx *sql.Tx, dbVins dbtypes.VinTxPropertyARRAY, checked bo
}
return ids, nil
}
+*/
// InsertVins is like InsertVin, except that it operates on a slice of vin data.
+/*
func InsertVins(db *sql.DB, dbVins dbtypes.VinTxPropertyARRAY, checked bool, updateOnConflict ...bool) ([]uint64, error) {
dbtx, err := db.Begin()
if err != nil {
@@ -1998,6 +1983,7 @@ func InsertVins(db *sql.DB, dbVins dbtypes.VinTxPropertyARRAY, checked bool, upd
return ids, dbtx.Commit()
}
+*/
// InsertVout either inserts, attempts to insert, or upserts the given vout data
// into the vouts table. If checked=false, an unconditional insert as attempted,
@@ -2007,6 +1993,7 @@ func InsertVins(db *sql.DB, dbVins dbtypes.VinTxPropertyARRAY, checked bool, upd
// the new/updated/conflicting row is returned. The updateOnConflict argument
// may be omitted, in which case an upsert will be favored over no nothing, but
// only if checked=true.
+/*
func InsertVout(db *sql.DB, dbVout *dbtypes.Vout, checked bool, updateOnConflict ...bool) (uint64, error) {
doUpsert := true
if len(updateOnConflict) > 0 {
@@ -2022,19 +2009,52 @@ func InsertVout(db *sql.DB, dbVout *dbtypes.Vout, checked bool, updateOnConflict
pq.Array(dbVout.ScriptPubKeyData.Addresses)).Scan(&id)
return id, err
}
+*/
+
+type addressList []string
+
+func (al addressList) Value() (driver.Value, error) {
+ switch len(al) {
+ case 0:
+ return "unknown", nil
+ case 1:
+ return al[0], nil
+ }
+
+ return pq.StringArray(al).Value()
+}
-// InsertVoutsStmt is like InsertVouts, except that it takes a sql.Stmt. The
+func (al *addressList) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case string:
+ switch src {
+ case "unknown", "{}":
+ *al = []string{}
+ return nil
+ }
+ if len(src) > 2 && src[0] == '{' && src[len(src)-1] == '}' {
+ *al = strings.Split(src, ",")
+ return nil
+ }
+ *al = []string{src}
+ return nil
+ default:
+ return errors.New("not an addressList")
+ }
+}
+
+// insertVoutsStmt is like InsertVouts, except that it takes a sql.Stmt. The
// caller is required to Close the statement.
-func InsertVoutsStmt(stmt *sql.Stmt, dbVouts []*dbtypes.Vout, checked bool, doUpsert bool) ([]uint64, []dbtypes.AddressRow, error) {
+func insertVoutsStmt(stmt *sql.Stmt, dbVouts []*dbtypes.Vout) ([]uint64, []dbtypes.AddressRow, error) {
addressRows := make([]dbtypes.AddressRow, 0, len(dbVouts)) // may grow with multisig
ids := make([]uint64, 0, len(dbVouts))
for _, vout := range dbVouts {
var id uint64
err := stmt.QueryRow(
vout.TxHash, vout.TxIndex, vout.TxTree, vout.Value, int32(vout.Version),
- vout.ScriptPubKey, int32(vout.ScriptPubKeyData.ReqSigs),
+ // vout.ScriptPubKey, int32(vout.ScriptPubKeyData.ReqSigs),
vout.ScriptPubKeyData.Type,
- pq.Array(vout.ScriptPubKeyData.Addresses), vout.Mixed).Scan(&id)
+ addressList(vout.ScriptPubKeyData.Addresses), vout.Mixed).Scan(&id)
if err != nil {
if err == sql.ErrNoRows {
continue
@@ -2063,6 +2083,7 @@ func InsertVoutsStmt(stmt *sql.Stmt, dbVouts []*dbtypes.Vout, checked bool, doUp
// InsertVoutsDbTxn is like InsertVouts, except that it takes a sql.Tx. The
// caller is required to Commit or Rollback the transaction depending on the
// returned error value.
+/*
func InsertVoutsDbTxn(dbTx *sql.Tx, dbVouts []*dbtypes.Vout, checked bool, doUpsert bool) ([]uint64, []dbtypes.AddressRow, error) {
stmt, err := dbTx.Prepare(internal.MakeVoutInsertStatement(checked, doUpsert))
if err != nil {
@@ -2080,9 +2101,11 @@ func InsertVoutsDbTxn(dbTx *sql.Tx, dbVouts []*dbtypes.Vout, checked bool, doUps
return ids, addressRows, stmt.Close()
}
+*/
// InsertVouts is like InsertVout, except that it operates on a slice of vout
// data.
+/*
func InsertVouts(db *sql.DB, dbVouts []*dbtypes.Vout, checked bool, updateOnConflict ...bool) ([]uint64, []dbtypes.AddressRow, error) {
// All inserts in atomic DB transaction
dbTx, err := db.Begin()
@@ -2103,33 +2126,38 @@ func InsertVouts(db *sql.DB, dbVouts []*dbtypes.Vout, checked bool, updateOnConf
return ids, addressRows, dbTx.Commit()
}
+*/
-func RetrievePkScriptByVinID(ctx context.Context, db *sql.DB, vinID uint64) (pkScript []byte, ver uint16, err error) {
- err = db.QueryRowContext(ctx, internal.SelectPkScriptByVinID, vinID).Scan(&ver, &pkScript)
- return
-}
+// func retrievePkScriptByVinID(ctx context.Context, db *sql.DB, vinID uint64) (pkScript []byte, ver uint16, err error) {
+// err = db.QueryRowContext(ctx, internal.SelectPkScriptByVinID, vinID).Scan(&ver, &pkScript)
+// return
+// }
-func RetrievePkScriptByVoutID(ctx context.Context, db *sql.DB, voutID uint64) (pkScript []byte, ver uint16, err error) {
+/*
+func retrievePkScriptByVoutID(ctx context.Context, db *sql.DB, voutID uint64) (pkScript []byte, ver uint16, err error) {
err = db.QueryRowContext(ctx, internal.SelectPkScriptByID, voutID).Scan(&ver, &pkScript)
return
}
-func RetrievePkScriptByOutpoint(ctx context.Context, db *sql.DB, txHash string, voutIndex uint32) (pkScript []byte, ver uint16, err error) {
+func retrievePkScriptByOutpoint(ctx context.Context, db *sql.DB, txHash string, voutIndex uint32) (pkScript []byte, ver uint16, err error) {
err = db.QueryRowContext(ctx, internal.SelectPkScriptByOutpoint, txHash, voutIndex).Scan(&ver, &pkScript)
return
}
-func RetrieveVoutIDByOutpoint(ctx context.Context, db *sql.DB, txHash string, voutIndex uint32) (id uint64, err error) {
+func retrieveVoutIDByOutpoint(ctx context.Context, db *sql.DB, txHash string, voutIndex uint32) (id uint64, err error) {
err = db.QueryRowContext(ctx, internal.SelectVoutIDByOutpoint, txHash, voutIndex).Scan(&id)
return
}
+*/
-func RetrieveVoutValue(ctx context.Context, db *sql.DB, txHash string, voutIndex uint32) (value uint64, err error) {
+// TEST ONLY REMOVE
+func retrieveVoutValue(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash, voutIndex uint32) (value uint64, err error) {
err = db.QueryRowContext(ctx, internal.RetrieveVoutValue, txHash, voutIndex).Scan(&value)
return
}
-func RetrieveVoutValues(ctx context.Context, db *sql.DB, txHash string) (values []uint64, txInds []uint32, txTrees []int8, err error) {
+// TEST ONLY REMOVE
+func retrieveVoutValues(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash) (values []uint64, txInds []uint32, txTrees []int8, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.RetrieveVoutValues, txHash)
if err != nil {
@@ -2155,10 +2183,11 @@ func RetrieveVoutValues(ctx context.Context, db *sql.DB, txHash string) (values
return
}
-// RetrieveAllVinDbIDs gets every row ID (the primary keys) for the vins table.
+// retrieveAllVinDbIDs gets every row ID (the primary keys) for the vins table.
// This function is used in UpdateSpendingInfoInAllAddresses, so it should not
// be subject to timeouts.
-func RetrieveAllVinDbIDs(db *sql.DB) (vinDbIDs []uint64, err error) {
+/* unused
+func retrieveAllVinDbIDs(db *sql.DB) (vinDbIDs []uint64, err error) {
var rows *sql.Rows
rows, err = db.Query(internal.SelectVinIDsALL)
if err != nil {
@@ -2179,76 +2208,85 @@ func RetrieveAllVinDbIDs(db *sql.DB) (vinDbIDs []uint64, err error) {
return
}
+*/
-// RetrieveFundingOutpointByTxIn gets the previous outpoint for a transaction
+// retrieveFundingOutpointByTxIn gets the previous outpoint for a transaction
// input specified by transaction hash and input index.
-func RetrieveFundingOutpointByTxIn(ctx context.Context, db *sql.DB, txHash string,
+/* unused
+func retrieveFundingOutpointByTxIn(ctx context.Context, db *sql.DB, txHash string,
vinIndex uint32) (id uint64, tx string, index uint32, tree int8, err error) {
err = db.QueryRowContext(ctx, internal.SelectFundingOutpointByTxIn, txHash, vinIndex).
Scan(&id, &tx, &index, &tree)
return
}
+*/
-// RetrieveFundingOutpointByVinID gets the previous outpoint for a transaction
+// retrieveFundingOutpointByVinID gets the previous outpoint for a transaction
// input specified by row ID in the vins table.
-func RetrieveFundingOutpointByVinID(ctx context.Context, db *sql.DB, vinDbID uint64) (tx string, index uint32, tree int8, err error) {
+/* unused
+func retrieveFundingOutpointByVinID(ctx context.Context, db *sql.DB, vinDbID uint64) (tx string, index uint32, tree int8, err error) {
err = db.QueryRowContext(ctx, internal.SelectFundingOutpointByVinID, vinDbID).
Scan(&tx, &index, &tree)
return
}
+*/
-// RetrieveFundingOutpointIndxByVinID gets the transaction output index of the
+// retrieveFundingOutpointIndxByVinID gets the transaction output index of the
// previous outpoint for a transaction input specified by row ID in the vins
// table.
-func RetrieveFundingOutpointIndxByVinID(ctx context.Context, db *sql.DB, vinDbID uint64) (idx uint32, err error) {
+func retrieveFundingOutpointIndxByVinID(ctx context.Context, db *sql.DB, vinDbID uint64) (idx uint32, err error) {
err = db.QueryRowContext(ctx, internal.SelectFundingOutpointIndxByVinID, vinDbID).Scan(&idx)
return
}
-// RetrieveFundingTxByTxIn gets the transaction hash of the previous outpoint
+// retrieveFundingTxByTxIn gets the transaction hash of the previous outpoint
// for a transaction input specified by hash and input index.
-func RetrieveFundingTxByTxIn(ctx context.Context, db *sql.DB, txHash string, vinIndex uint32) (id uint64, tx string, err error) {
+/* unused
+func retrieveFundingTxByTxIn(ctx context.Context, db *sql.DB, txHash string, vinIndex uint32) (id uint64, tx string, err error) {
err = db.QueryRowContext(ctx, internal.SelectFundingTxByTxIn, txHash, vinIndex).
Scan(&id, &tx)
return
}
+*/
-// RetrieveFundingTxByVinDbID gets the transaction hash of the previous outpoint
+// retrieveFundingTxByVinDbID gets the transaction hash of the previous outpoint
// for a transaction input specified by row ID in the vins table. This function
// is used only in UpdateSpendingInfoInAllTickets, so it should not be subject
// to timeouts.
-func RetrieveFundingTxByVinDbID(ctx context.Context, db *sql.DB, vinDbID uint64) (tx string, err error) {
- err = db.QueryRowContext(ctx, internal.SelectFundingTxByVinID, vinDbID).Scan(&tx)
- return
-}
+// func retrieveFundingTxByVinDbID(ctx context.Context, db *sql.DB, vinDbID uint64) (tx string, err error) {
+// err = db.QueryRowContext(ctx, internal.SelectFundingTxByVinID, vinDbID).Scan(&tx)
+// return
+// }
-// RetrieveSpendingTxByVinID gets the spending transaction input (hash, vin
+// retrieveSpendingTxByVinID gets the spending transaction input (hash, vin
// number, and tx tree) for the transaction input specified by row ID in the
// vins table.
-func RetrieveSpendingTxByVinID(ctx context.Context, db *sql.DB, vinDbID uint64) (tx string,
+/* unused
+func retrieveSpendingTxByVinID(ctx context.Context, db *sql.DB, vinDbID uint64) (tx string,
vinIndex uint32, tree int8, err error) {
err = db.QueryRowContext(ctx, internal.SelectSpendingTxByVinID, vinDbID).
Scan(&tx, &vinIndex, &tree)
return
}
+*/
-// RetrieveSpendingTxByTxOut gets any spending transaction input info for a
+// retrieveSpendingTxByTxOut gets any spending transaction input info for a
// previous outpoint specified by funding transaction hash and vout number. This
// function is called by SpendingTransaction, an important part of the address
// page loading.
-func RetrieveSpendingTxByTxOut(ctx context.Context, db *sql.DB, txHash string,
- voutIndex uint32) (id uint64, tx string, vin uint32, tree int8, err error) {
+func retrieveSpendingTxByTxOut(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash,
+ voutIndex uint32) (id uint64, tx dbtypes.ChainHash, vin uint32, err error) {
err = db.QueryRowContext(ctx, internal.SelectSpendingTxByPrevOut,
- txHash, voutIndex).Scan(&id, &tx, &vin, &tree)
+ txHash, voutIndex).Scan(&id, &tx, &vin)
return
}
-// RetrieveSpendingTxsByFundingTx gets info on all spending transaction inputs
+// retrieveSpendingTxsByFundingTx gets info on all spending transaction inputs
// for the given funding transaction specified by DB row ID. This function is
// called by SpendingTransactions, an important part of the transaction page
// loading, among other functions..
-func RetrieveSpendingTxsByFundingTx(ctx context.Context, db *sql.DB, fundingTxID string) (dbIDs []uint64,
- txns []string, vinInds []uint32, voutInds []uint32, err error) {
+func retrieveSpendingTxsByFundingTx(ctx context.Context, db *sql.DB, fundingTxID dbtypes.ChainHash) (dbIDs []uint64,
+ txns []dbtypes.ChainHash, vinInds []uint32, voutInds []uint32, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectSpendingTxsByPrevTx, fundingTxID)
if err != nil {
@@ -2258,15 +2296,15 @@ func RetrieveSpendingTxsByFundingTx(ctx context.Context, db *sql.DB, fundingTxID
for rows.Next() {
var id uint64
- var tx string
+ var txHash dbtypes.ChainHash
var vin, vout uint32
- err = rows.Scan(&id, &tx, &vin, &vout)
+ err = rows.Scan(&id, &txHash, &vin, &vout)
if err != nil {
return
}
dbIDs = append(dbIDs, id)
- txns = append(txns, tx)
+ txns = append(txns, txHash)
vinInds = append(vinInds, vin)
voutInds = append(voutInds, vout)
}
@@ -2275,10 +2313,10 @@ func RetrieveSpendingTxsByFundingTx(ctx context.Context, db *sql.DB, fundingTxID
return
}
-// RetrieveSpendingTxsByFundingTxWithBlockHeight will retrieve all transactions,
+// retrieveSpendingTxsByFundingTxWithBlockHeight will retrieve all transactions,
// indexes and block heights funded by a specific transaction. This function is
// used by the DCR to Insight transaction converter.
-func RetrieveSpendingTxsByFundingTxWithBlockHeight(ctx context.Context, db *sql.DB, fundingTxID string) (aSpendByFunHash []*apitypes.SpendByFundingHash, err error) {
+func retrieveSpendingTxsByFundingTxWithBlockHeight(ctx context.Context, db *sql.DB, fundingTxID dbtypes.ChainHash) (aSpendByFunHash []*apitypes.SpendByFundingHash, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectSpendingTxsByPrevTxWithBlockHeight, fundingTxID)
if err != nil {
@@ -2301,8 +2339,9 @@ func RetrieveSpendingTxsByFundingTxWithBlockHeight(ctx context.Context, db *sql.
return
}
-// RetrieveVinByID gets from the vins table for the provided row ID.
-func RetrieveVinByID(ctx context.Context, db *sql.DB, vinDbID uint64) (prevOutHash string, prevOutVoutInd uint32,
+// retrieveVinByID gets from the vins table for the provided row ID.
+/* unused
+func retrieveVinByID(ctx context.Context, db *sql.DB, vinDbID uint64) (prevOutHash string, prevOutVoutInd uint32,
prevOutTree int8, txHash string, txVinInd uint32, txTree int8, valueIn int64, err error) {
var blockTime dbtypes.TimeDef
var isValid, isMainchain bool
@@ -2312,11 +2351,12 @@ func RetrieveVinByID(ctx context.Context, db *sql.DB, vinDbID uint64) (prevOutHa
&prevOutHash, &prevOutVoutInd, &prevOutTree, &valueIn, &txType)
return
}
+*/
-// RetrieveVinsByIDs retrieves vin details for the rows of the vins table
+// retrieveVinsByIDs retrieves vin details for the rows of the vins table
// specified by the provided row IDs. This function is an important part of the
// transaction page.
-func RetrieveVinsByIDs(ctx context.Context, db *sql.DB, vinDbIDs []uint64) ([]dbtypes.VinTxProperty, error) {
+func retrieveVinsByIDs(ctx context.Context, db *sql.DB, vinDbIDs []uint64) ([]dbtypes.VinTxProperty, error) {
vins := make([]dbtypes.VinTxProperty, len(vinDbIDs))
for i, id := range vinDbIDs {
vin := &vins[i]
@@ -2331,52 +2371,52 @@ func RetrieveVinsByIDs(ctx context.Context, db *sql.DB, vinDbIDs []uint64) ([]db
return vins, nil
}
-// RetrieveVoutsByIDs retrieves vout details for the rows of the vouts table
+// retrieveVoutsByIDs retrieves vout details for the rows of the vouts table
// specified by the provided row IDs. This function is an important part of the
// transaction page.
-func RetrieveVoutsByIDs(ctx context.Context, db *sql.DB, voutDbIDs []uint64) ([]dbtypes.Vout, error) {
+func retrieveVoutsByIDs(ctx context.Context, db *sql.DB, voutDbIDs []uint64) ([]dbtypes.Vout, error) {
vouts := make([]dbtypes.Vout, len(voutDbIDs))
for i, id := range voutDbIDs {
vout := &vouts[i]
var id0 uint64
var spendTxRowID sql.NullInt64 // discarded, but can be NULL
- var reqSigs uint32
- var addresses string
+ // var reqSigs uint32
+ var addresses addressList
var scriptClass dbtypes.ScriptClass // or scan a string and then dbtypes.NewScriptClassFromString(scriptTypeString)
err := db.QueryRowContext(ctx, internal.SelectVoutByID, id).Scan(&id0, &vout.TxHash,
&vout.TxIndex, &vout.TxTree, &vout.Value, &vout.Version,
- &vout.ScriptPubKey, &reqSigs, &scriptClass, &addresses, &vout.Mixed, &spendTxRowID)
+ /* &vout.ScriptPubKey, &reqSigs, */ &scriptClass, &addresses, &vout.Mixed, &spendTxRowID)
if err != nil {
return nil, err
}
// Parse the addresses array
- replacer := strings.NewReplacer("{", "", "}", "")
- addresses = replacer.Replace(addresses)
+ // replacer := strings.NewReplacer("{", "", "}", "")
+ // addresses = replacer.Replace(addresses)
- vout.ScriptPubKeyData.ReqSigs = reqSigs
+ // vout.ScriptPubKeyData.ReqSigs = reqSigs
vout.ScriptPubKeyData.Type = scriptClass
// If there are no addresses, the Addresses should be nil or length
// zero. However, strings.Split will return [""] if addresses is "".
// If that is the case, leave it as a nil slice.
if len(addresses) > 0 {
- vout.ScriptPubKeyData.Addresses = strings.Split(addresses, ",")
+ vout.ScriptPubKeyData.Addresses = addresses // strings.Split(addresses, ",")
}
}
return vouts, nil
}
-func RetrieveUTXOsByVinsJoin(ctx context.Context, db *sql.DB) ([]dbtypes.UTXO, error) {
- return retrieveUTXOs(ctx, db, internal.SelectUTXOsViaVinsMatch)
-}
+// func retrieveUTXOsByVinsJoin(ctx context.Context, db *sql.DB) ([]dbtypes.UTXO, error) {
+// return retrieveUTXOsStmt(ctx, db, internal.SelectUTXOsViaVinsMatch)
+// }
-// RetrieveUTXOs gets the entire UTXO set from the vouts and vins tables.
-func RetrieveUTXOs(ctx context.Context, db *sql.DB) ([]dbtypes.UTXO, error) {
- return retrieveUTXOs(ctx, db, internal.SelectUTXOs)
+// retrieveUTXOs gets the entire UTXO set from the vouts and vins tables.
+func retrieveUTXOs(ctx context.Context, db *sql.DB) ([]dbtypes.UTXO, error) {
+ return retrieveUTXOsStmt(ctx, db, internal.SelectUTXOs)
}
-// retrieveUTXOs gets the entire UTXO set from the vouts and vins tables.
-func retrieveUTXOs(ctx context.Context, db *sql.DB, stmt string) ([]dbtypes.UTXO, error) {
- _, height, err := DBBestBlock(ctx, db)
+// retrieveUTXOsStmt gets the entire UTXO set from the vouts and vins tables.
+func retrieveUTXOsStmt(ctx context.Context, db *sql.DB, stmt string) ([]dbtypes.UTXO, error) {
+ _, height, err := dbBestBlock(ctx, db)
if err != nil {
return nil, err
}
@@ -2426,6 +2466,7 @@ func retrieveUTXOs(ctx context.Context, db *sql.DB, stmt string) ([]dbtypes.UTXO
// SetSpendingForVinDbIDs updates rows of the addresses table with spending
// information from the rows of the vins table specified by vinDbIDs. This does
// not insert the spending transaction into the addresses table.
+/*
func SetSpendingForVinDbIDs(db *sql.DB, vinDbIDs []uint64) ([]int64, int64, error) {
// Get funding details for vin and set them in the address table.
dbtx, err := db.Begin()
@@ -2484,10 +2525,12 @@ func SetSpendingForVinDbIDs(db *sql.DB, vinDbIDs []uint64) ([]int64, int64, erro
return addressRowsUpdated, totalUpdated, dbtx.Commit()
}
+*/
// SetSpendingForVinDbID updates rows of the addresses table with spending
// information from the row of the vins table specified by vinDbID. This does
// not insert the spending transaction into the addresses table.
+/*
func SetSpendingForVinDbID(db *sql.DB, vinDbID uint64) (int64, error) {
// Get funding details for the vin and set them in the address table.
dbtx, err := db.Begin()
@@ -2521,15 +2564,16 @@ func SetSpendingForVinDbID(db *sql.DB, vinDbID uint64) (int64, error) {
return N, dbtx.Commit()
}
+*/
-// SetSpendingForFundingOP updates funding rows of the addresses table with the
+// setSpendingForFundingOP updates funding rows of the addresses table with the
// provided spending transaction output info. Only update rows of mainchain or
// side chain transactions according to forMainchain. Technically
// forMainchain=false also permits updating rows that are stake invalidated, but
// consensus-validated transactions cannot spend outputs from stake-invalidated
// transactions so the funding tx must not be invalid.
-func SetSpendingForFundingOP(db SqlExecutor, fundingTxHash string, fundingTxVoutIndex uint32,
- spendingTxHash string, forMainchain bool) (int64, error) {
+func setSpendingForFundingOP(db SqlExecutor, fundingTxHash dbtypes.ChainHash, fundingTxVoutIndex uint32,
+ spendingTxHash dbtypes.ChainHash, forMainchain bool) (int64, error) {
// Update the matchingTxHash for the funding tx output. matchingTxHash here
// is the hash of the funding tx.
res, err := db.Exec(internal.SetAddressMatchingTxHashForOutpoint,
@@ -2588,6 +2632,7 @@ func resetSpendingForVoutsByTxRowID(tx *sql.Tx, spendingTxRowIDs []int64) (int64
// InsertSpendingAddressRow inserts a new spending tx row, and updates any
// corresponding funding tx row.
+/*
func InsertSpendingAddressRow(db *sql.DB, fundingTxHash string, fundingTxVoutIndex uint32, fundingTxTree int8,
spendingTxHash string, spendingTxVinIndex uint32, vinDbID uint64, utxoData *dbtypes.UTXOData,
checked, updateExisting, mainchain, valid bool, txType int16, updateFundingRow bool,
@@ -2608,25 +2653,9 @@ func InsertSpendingAddressRow(db *sql.DB, fundingTxHash string, fundingTxVoutInd
return fromAddrs, c, voutDbID, mixedOut, dbtx.Commit()
}
+*/
-func updateSpendTxInfoInAllVouts(db SqlExecutor) (int64, error) {
- // Set vouts.spend_tx_row_id using vouts.tx_hash, vins.prev_tx_hash, and
- // transactions.tx_hash.
- res, err := db.Exec(`UPDATE vouts SET spend_tx_row_id = transactions.id
- FROM vins, transactions
- WHERE vouts.tx_hash=vins.prev_tx_hash
- AND vouts.tx_index=vins.prev_tx_index
- AND vouts.value > 0
- AND vins.is_mainchain=TRUE
- AND transactions.tx_hash=vins.tx_hash
- AND transactions.is_valid;`) // transactions.tree=1 implies transactions.is_valid=true
- if err != nil {
- return 0, fmt.Errorf("UPDATE vouts.spend_tx_row_id error: %w", err)
- }
- return res.RowsAffected()
-}
-
-func retrieveTxOutData(tx SqlQueryer, txid string, idx uint32, tree int8) (*dbtypes.UTXOData, error) {
+func retrieveTxOutData(tx SqlQueryer, txid dbtypes.ChainHash, idx uint32, tree int8) (*dbtypes.UTXOData, error) {
var data dbtypes.UTXOData
var addrArray string
err := tx.QueryRow(internal.SelectVoutAddressesByTxOut, txid, idx, tree).
@@ -2646,8 +2675,8 @@ func retrieveTxOutData(tx SqlQueryer, txid string, idx uint32, tree int8) (*dbty
// insertSpendingAddressRow inserts a new row in the addresses table for a new
// transaction input, and updates the spending information for the addresses
// table row and vouts table row corresponding to the previous outpoint.
-func insertSpendingAddressRow(tx *sql.Tx, fundingTxHash string, fundingTxVoutIndex uint32,
- fundingTxTree int8, spendingTxHash string, spendingTxVinIndex uint32, vinDbID uint64,
+func insertSpendingAddressRow(tx *sql.Tx, fundingTxHash dbtypes.ChainHash, fundingTxVoutIndex uint32,
+ fundingTxTree int8, spendingTxHash dbtypes.ChainHash, spendingTxVinIndex uint32, vinDbID uint64,
spentUtxoData *dbtypes.UTXOData, checked, updateExisting, mainchain, valid bool, txType int16,
updateFundingRow bool, blockT ...dbtypes.TimeDef) ([]string, int64, int64, bool, error) {
@@ -2707,7 +2736,7 @@ func insertSpendingAddressRow(tx *sql.Tx, fundingTxHash string, fundingTxVoutInd
// the spending transaction is side chain, so must be the funding tx to
// update it. (Similarly for mainchain, but a mainchain block always has
// a parent on the main chain).
- N, err := SetSpendingForFundingOP(tx, fundingTxHash, fundingTxVoutIndex,
+ N, err := setSpendingForFundingOP(tx, fundingTxHash, fundingTxVoutIndex,
spendingTxHash, mainchain)
return addrs, N, voutDbID, mixed, err
}
@@ -2797,13 +2826,13 @@ func retrieveTotalAgendaVotesCount(ctx context.Context, db *sql.DB, agendaID str
// --- atomic swap tables
-func InsertSwap(db SqlExecutor, spendHeight int64, swapInfo *txhelpers.AtomicSwapData) error {
+func insertSwap(db SqlExecutor, spendHeight int64, swapInfo *txhelpers.AtomicSwapData) error {
var secret interface{} // only nil interface stores a NULL, not even nil slice
if len(swapInfo.Secret) > 0 {
secret = swapInfo.Secret
}
- _, err := db.Exec(internal.InsertContractSpend, swapInfo.ContractTx.String(), swapInfo.ContractVout,
- swapInfo.SpendTx.String(), swapInfo.SpendVin, spendHeight,
+ _, err := db.Exec(internal.InsertContractSpend, (*dbtypes.ChainHash)(swapInfo.ContractTx), swapInfo.ContractVout,
+ (*dbtypes.ChainHash)(swapInfo.SpendTx), swapInfo.SpendVin, spendHeight,
swapInfo.ContractAddress, swapInfo.Value,
swapInfo.SecretHash[:], secret, swapInfo.Locktime)
return err
@@ -2811,6 +2840,7 @@ func InsertSwap(db SqlExecutor, spendHeight int64, swapInfo *txhelpers.AtomicSwa
// --- transactions table ---
+/*
func InsertTx(db *sql.DB, dbTx *dbtypes.Tx, checked, updateExistingRecords bool) (uint64, error) {
insertStatement := internal.MakeTxInsertStatement(checked, updateExistingRecords)
var id uint64
@@ -2824,13 +2854,14 @@ func InsertTx(db *sql.DB, dbTx *dbtypes.Tx, checked, updateExistingRecords bool)
dbTx.IsValid, dbTx.IsMainchainBlock).Scan(&id)
return id, err
}
+*/
-func InsertTxnsStmt(stmt *sql.Stmt, dbTxns []*dbtypes.Tx, checked, updateExistingRecords bool) ([]uint64, error) {
+func insertTxnsStmt(stmt *sql.Stmt, dbTxns []*dbtypes.Tx) ([]uint64, error) {
ids := make([]uint64, 0, len(dbTxns))
for _, tx := range dbTxns {
var id uint64
err := stmt.QueryRow(
- tx.BlockHash, tx.BlockHeight, tx.BlockTime, tx.Time,
+ tx.BlockHash, tx.BlockHeight, tx.BlockTime,
tx.TxType, int16(tx.Version), tx.Tree, tx.TxID, tx.BlockIndex,
int32(tx.Locktime), int32(tx.Expiry), tx.Size, tx.Spent, tx.Sent, tx.Fees,
tx.MixCount, tx.MixDenom,
@@ -2848,13 +2879,13 @@ func InsertTxnsStmt(stmt *sql.Stmt, dbTxns []*dbtypes.Tx, checked, updateExistin
return ids, nil
}
-func InsertTxnsDbTxn(dbTx *sql.Tx, dbTxns []*dbtypes.Tx, checked, updateExistingRecords bool) ([]uint64, error) {
+func insertTxnsDbTxn(dbTx *sql.Tx, dbTxns []*dbtypes.Tx, checked, updateExistingRecords bool) ([]uint64, error) {
stmt, err := dbTx.Prepare(internal.MakeTxInsertStatement(checked, updateExistingRecords))
if err != nil {
return nil, err
}
- ids, err := InsertTxnsStmt(stmt, dbTxns, checked, updateExistingRecords)
+ ids, err := insertTxnsStmt(stmt, dbTxns)
// Try to close the statement even if the inserts failed.
errClose := stmt.Close()
if err != nil {
@@ -2866,6 +2897,7 @@ func InsertTxnsDbTxn(dbTx *sql.Tx, dbTxns []*dbtypes.Tx, checked, updateExisting
return ids, nil
}
+/*
func InsertTxns(db *sql.DB, dbTxns []*dbtypes.Tx, checked, updateExistingRecords bool) ([]uint64, error) {
dbtx, err := db.Begin()
if err != nil {
@@ -2908,17 +2940,18 @@ func InsertTxns(db *sql.DB, dbTxns []*dbtypes.Tx, checked, updateExistingRecords
return ids, dbtx.Commit()
}
+*/
-// RetrieveDbTxByHash retrieves a row of the transactions table corresponding to
+// retrieveDbTxByHash retrieves a row of the transactions table corresponding to
// the given transaction hash. Stake-validated transactions in mainchain blocks
// are chosen first. This function is used by FillAddressTransactions, an
// important component of the addresses page.
-func RetrieveDbTxByHash(ctx context.Context, db *sql.DB, txHash string) (id uint64, dbTx *dbtypes.Tx, err error) {
+func retrieveDbTxByHash(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash) (id uint64, dbTx *dbtypes.Tx, err error) {
dbTx = new(dbtypes.Tx)
vinDbIDs := dbtypes.UInt64Array(dbTx.VinDbIds)
voutDbIDs := dbtypes.UInt64Array(dbTx.VoutDbIds)
err = db.QueryRowContext(ctx, internal.SelectFullTxByHash, txHash).Scan(&id,
- &dbTx.BlockHash, &dbTx.BlockHeight, &dbTx.BlockTime, &dbTx.Time,
+ &dbTx.BlockHash, &dbTx.BlockHeight, &dbTx.BlockTime,
&dbTx.TxType, &dbTx.Version, &dbTx.Tree, &dbTx.TxID, &dbTx.BlockIndex,
&dbTx.Locktime, &dbTx.Expiry, &dbTx.Size, &dbTx.Spent, &dbTx.Sent,
&dbTx.Fees, &dbTx.MixCount, &dbTx.MixDenom, &dbTx.NumVin, &vinDbIDs,
@@ -2928,10 +2961,11 @@ func RetrieveDbTxByHash(ctx context.Context, db *sql.DB, txHash string) (id uint
return
}
-// RetrieveFullTxByHash gets all data from the transactions table for the
+// retrieveFullTxByHash gets all data from the transactions table for the
// transaction specified by its hash. Transactions in valid and mainchain blocks
-// are chosen first. See also RetrieveDbTxByHash.
-func RetrieveFullTxByHash(ctx context.Context, db *sql.DB, txHash string) (id uint64,
+// are chosen first. See also retrieveDbTxByHash.
+/*
+func retrieveFullTxByHash(ctx context.Context, db *sql.DB, txHash string) (id uint64,
blockHash string, blockHeight int64, blockTime, timeVal dbtypes.TimeDef,
txType int16, version int32, tree int8, blockInd uint32,
lockTime, expiry int32, size uint32, spent, sent, fees int64,
@@ -2946,11 +2980,12 @@ func RetrieveFullTxByHash(ctx context.Context, db *sql.DB, txHash string) (id ui
&isValidBlock, &isMainchainBlock)
return
}
+*/
-// RetrieveDbTxsByHash retrieves all the rows of the transactions table,
+// retrieveDbTxsByHash retrieves all the rows of the transactions table,
// including the primary keys/ids, for the given transaction hash. This function
// is used by the transaction page via ChainDB.Transaction.
-func RetrieveDbTxsByHash(ctx context.Context, db *sql.DB, txHash string) (ids []uint64, dbTxs []*dbtypes.Tx, err error) {
+func retrieveDbTxsByHash(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash) (ids []uint64, dbTxs []*dbtypes.Tx, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectFullTxsByHash, txHash)
if err != nil {
@@ -2966,7 +3001,7 @@ func RetrieveDbTxsByHash(ctx context.Context, db *sql.DB, txHash string) (ids []
// voutDbIDs := dbtypes.UInt64Array(dbTx.VoutDbIds)
err = rows.Scan(&id,
- &dbTx.BlockHash, &dbTx.BlockHeight, &dbTx.BlockTime, &dbTx.Time,
+ &dbTx.BlockHash, &dbTx.BlockHeight, &dbTx.BlockTime,
&dbTx.TxType, &dbTx.Version, &dbTx.Tree, &dbTx.TxID, &dbTx.BlockIndex,
&dbTx.Locktime, &dbTx.Expiry, &dbTx.Size, &dbTx.Spent, &dbTx.Sent,
&dbTx.Fees, &dbTx.MixCount, &dbTx.MixDenom, &dbTx.NumVin, &vinids,
@@ -2986,11 +3021,12 @@ func RetrieveDbTxsByHash(ctx context.Context, db *sql.DB, txHash string) (ids []
return
}
-// RetrieveTxnsVinsByBlock retrieves for all the transactions in the specified
+// retrieveTxnsVinsByBlock retrieves for all the transactions in the specified
// block the vin_db_ids arrays, is_valid, and is_mainchain. This function is
// used by handleVinsTableMainchainupgrade, so it should not be subject to
// timeouts.
-func RetrieveTxnsVinsByBlock(ctx context.Context, db *sql.DB, blockHash string) (vinDbIDs []dbtypes.UInt64Array,
+/*
+func retrieveTxnsVinsByBlock(ctx context.Context, db *sql.DB, blockHash string) (vinDbIDs []dbtypes.UInt64Array,
areValid []bool, areMainchain []bool, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectTxnsVinsByBlock, blockHash)
@@ -3015,12 +3051,13 @@ func RetrieveTxnsVinsByBlock(ctx context.Context, db *sql.DB, blockHash string)
return
}
+*/
-// RetrieveTxnsVinsVoutsByBlock retrieves for all the transactions in the
+// retrieveTxnsVinsVoutsByBlock retrieves for all the transactions in the
// specified block the vin_db_ids and vout_db_ids arrays. This function is used
// only by UpdateLastAddressesValid and other setting functions, where it should
// not be subject to a timeout.
-func RetrieveTxnsVinsVoutsByBlock(ctx context.Context, db *sql.DB, blockHash string, onlyRegular bool) (vinDbIDs, voutDbIDs []dbtypes.UInt64Array,
+func retrieveTxnsVinsVoutsByBlock(ctx context.Context, db *sql.DB, blockHash dbtypes.ChainHash, onlyRegular bool) (vinDbIDs, voutDbIDs []dbtypes.UInt64Array,
areMainchain []bool, err error) {
stmt := internal.SelectTxnsVinsVoutsByBlock
if onlyRegular {
@@ -3051,21 +3088,23 @@ func RetrieveTxnsVinsVoutsByBlock(ctx context.Context, db *sql.DB, blockHash str
return
}
-func RetrieveTxByHash(ctx context.Context, db *sql.DB, txHash string) (id uint64, blockHash string,
+func retrieveTxByHash(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash) (id uint64, blockHash dbtypes.ChainHash,
blockInd uint32, tree int8, err error) {
err = db.QueryRowContext(ctx, internal.SelectTxByHash, txHash).Scan(&id, &blockHash, &blockInd, &tree)
return
}
-func RetrieveTxBlockTimeByHash(ctx context.Context, db *sql.DB, txHash string) (blockTime dbtypes.TimeDef, err error) {
+/*
+func retrieveTxBlockTimeByHash(ctx context.Context, db *sql.DB, txHash string) (blockTime dbtypes.TimeDef, err error) {
err = db.QueryRowContext(ctx, internal.SelectTxBlockTimeByHash, txHash).Scan(&blockTime)
return
}
+*/
-// RetrieveTxsByBlockHash retrieves all transactions in a given block. This is
+// retrieveTxsByBlockHash retrieves all transactions in a given block. This is
// used by update functions, so care should be taken to not timeout in these
// cases.
-func RetrieveTxsByBlockHash(ctx context.Context, db *sql.DB, blockHash string) (ids []uint64, txs []string,
+func retrieveTxsByBlockHash(ctx context.Context, db *sql.DB, blockHash dbtypes.ChainHash) (txs []dbtypes.ChainHash,
blockInds []uint32, trees []int8, blockTimes []dbtypes.TimeDef, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectTxsByBlockHash, blockHash)
@@ -3075,17 +3114,15 @@ func RetrieveTxsByBlockHash(ctx context.Context, db *sql.DB, blockHash string) (
defer closeRows(rows)
for rows.Next() {
- var id uint64
var blockTime dbtypes.TimeDef
- var tx string
+ var tx dbtypes.ChainHash
var bind uint32
var tree int8
- err = rows.Scan(&id, &tx, &bind, &tree, &blockTime)
+ err = rows.Scan(&tx, &bind, &tree, &blockTime)
if err != nil {
return
}
- ids = append(ids, id)
txs = append(txs, tx)
blockInds = append(blockInds, bind)
trees = append(trees, tree)
@@ -3096,10 +3133,10 @@ func RetrieveTxsByBlockHash(ctx context.Context, db *sql.DB, blockHash string) (
return
}
-// RetrieveTxnsBlocks retrieves for the specified transaction hash the following
+// retrieveTxnsBlocks retrieves for the specified transaction hash the following
// data for each block containing the transactions: block_hash, block_index,
// is_valid, is_mainchain.
-func RetrieveTxnsBlocks(ctx context.Context, db *sql.DB, txHash string) (blockHashes []string,
+func retrieveTxnsBlocks(ctx context.Context, db *sql.DB, txHash dbtypes.ChainHash) (blockHashes []dbtypes.ChainHash,
blockHeights, blockIndexes []uint32, areValid, areMainchain []bool, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectTxsBlocks, txHash)
@@ -3109,7 +3146,7 @@ func RetrieveTxnsBlocks(ctx context.Context, db *sql.DB, txHash string) (blockHa
defer closeRows(rows)
for rows.Next() {
- var hash string
+ var hash dbtypes.ChainHash
var height, idx uint32
var isValid, isMainchain bool
err = rows.Scan(&height, &hash, &idx, &isValid, &isMainchain)
@@ -3155,7 +3192,7 @@ func appendChartBlocks(charts *cache.ChartData, rows *sql.Rows) error {
}
var timeDef dbtypes.TimeDef
- var workhex string
+ var workhex string // []byte
var count, size, height uint64
var rowCount int32
blocks := charts.Blocks
@@ -3167,6 +3204,7 @@ func appendChartBlocks(charts *cache.ChartData, rows *sql.Rows) error {
return err
}
+ // bigwork := new(big.Int).SetBytes(work)
bigwork, ok := new(big.Int).SetString(workhex, 16)
if !ok {
badRow()
@@ -3520,6 +3558,7 @@ func retrievePowerlessTickets(ctx context.Context, db *sql.DB) (*apitypes.Powerl
}
// retrieveTxPerDay fetches data for tx-per-day chart from the blocks table.
+/*
func retrieveTxPerDay(ctx context.Context, db *sql.DB, timeArr []dbtypes.TimeDef,
txCountArr []uint64) ([]dbtypes.TimeDef, []uint64, error) {
var since time.Time
@@ -3553,90 +3592,47 @@ func retrieveTxPerDay(ctx context.Context, db *sql.DB, timeArr []dbtypes.TimeDef
return timeArr, txCountArr, err
}
-
-// retrieveTicketByOutputCount fetches the data for ticket-by-outputs-windows
-// chart if outputCountType outputCountByTicketPoolWindow is passed and
-// ticket-by-outputs-blocks if outputCountType outputCountByAllBlocks is passed.
-func retrieveTicketByOutputCount(ctx context.Context, db *sql.DB, interval int64,
- dataType outputCountType, heightArr, soloArr, pooledArr []uint64) ([]uint64,
- []uint64, []uint64, error) {
- var since uint64
-
- if c := len(heightArr); c > 0 {
- since = heightArr[c-1]
-
- // drop the last entry to avoid duplication.
- if dataType == outputCountByTicketPoolWindow {
- heightArr = heightArr[:c-1]
- soloArr = soloArr[:c-1]
- pooledArr = pooledArr[:c-1]
- }
- }
-
- var query string
- var args []interface{}
- switch dataType {
- case outputCountByAllBlocks:
- query = internal.SelectTicketsOutputCountByAllBlocks
- args = []interface{}{stake.TxTypeSStx, since}
-
- case outputCountByTicketPoolWindow:
- query = internal.SelectTicketsOutputCountByTPWindow
- since = since * uint64(interval)
- args = []interface{}{stake.TxTypeSStx, since, interval}
-
- default:
- return heightArr, soloArr, pooledArr,
- fmt.Errorf("unknown output count type '%v'", dataType)
- }
-
- rows, err := db.QueryContext(ctx, query, args...)
- if err != nil {
- return heightArr, soloArr, pooledArr, err
- }
-
- defer closeRows(rows)
-
- for rows.Next() {
- var height, solo, pooled uint64
- if err = rows.Scan(&height, &solo, &pooled); err != nil {
- return heightArr, soloArr, pooledArr, err
- }
-
- heightArr = append(heightArr, height)
- soloArr = append(soloArr, solo)
- pooledArr = append(pooledArr, pooled)
- }
- err = rows.Err()
-
- return heightArr, soloArr, pooledArr, err
-}
+*/
// --- blocks and block_chain tables ---
-// InsertBlock inserts the specified dbtypes.Block as with the given
+// insertBlock inserts the specified dbtypes.Block as with the given
// valid/mainchain status. If checked is true, an upsert statement is used so
// that a unique constraint violation will result in an update instead of
// attempting to insert a duplicate row. If checked is false and there is a
// duplicate row, an error will be returned.
-func InsertBlock(db *sql.DB, dbBlock *dbtypes.Block, isValid, isMainchain, checked bool) (uint64, error) {
+func insertBlock(db *sql.DB, dbBlock *dbtypes.Block, isValid, isMainchain, checked bool) (uint64, error) {
insertStatement := internal.BlockInsertStatement(checked)
var id uint64
err := db.QueryRow(insertStatement,
dbBlock.Hash, dbBlock.Height, dbBlock.Size, isValid, isMainchain,
int32(dbBlock.Version), dbBlock.NumTx, dbBlock.NumRegTx,
- pq.Array(dbBlock.Tx), pq.Array(dbBlock.TxDbIDs),
- dbBlock.NumStakeTx, pq.Array(dbBlock.STx), pq.Array(dbBlock.STxDbIDs),
+ dbtypes.UInt64Array(dbBlock.TxDbIDs), dbBlock.NumStakeTx, dbtypes.UInt64Array(dbBlock.STxDbIDs),
dbBlock.Time, int64(dbBlock.Nonce), int16(dbBlock.VoteBits), dbBlock.Voters,
dbBlock.FreshStake, dbBlock.Revocations, dbBlock.PoolSize, int64(dbBlock.Bits),
int64(dbBlock.SBits), dbBlock.Difficulty, int32(dbBlock.StakeVersion),
- dbBlock.PreviousHash, dbBlock.ChainWork, pq.Array(dbBlock.Winners)).Scan(&id)
+ dbBlock.PreviousHash, dbBlock.ChainWork, dbtypes.ChainHashArray(dbBlock.Winners)).Scan(&id)
return id, err
}
-// InsertBlockPrevNext inserts a new row of the block_chain table.
-func InsertBlockPrevNext(db *sql.DB, blockDbID uint64,
- hash, prev, next string) error {
+// func insertBlock(db *sql.DB, dbBlock *dbtypes.Block, isValid, isMainchain, checked bool) (uint64, error) {
+// insertStatement := internal.BlockInsertStatement(checked)
+// var id uint64
+// err := db.QueryRow(insertStatement,
+// dbBlock.Hash, dbBlock.Height, dbBlock.Size, isValid, isMainchain,
+// int32(dbBlock.Version), dbBlock.NumTx, dbBlock.NumRegTx,
+// dbtypes.ChainHashArray2(dbBlock.Tx), dbtypes.UInt64Array(dbBlock.TxDbIDs),
+// dbBlock.NumStakeTx, dbtypes.ChainHashArray2(dbBlock.STx), dbtypes.UInt64Array(dbBlock.STxDbIDs),
+// dbBlock.Time, int64(dbBlock.Nonce), int16(dbBlock.VoteBits), dbBlock.Voters,
+// dbBlock.FreshStake, dbBlock.Revocations, dbBlock.PoolSize, int64(dbBlock.Bits),
+// int64(dbBlock.SBits), dbBlock.Difficulty, int32(dbBlock.StakeVersion),
+// dbBlock.PreviousHash, dbBlock.ChainWork, dbtypes.ChainHashArray2(dbBlock.Winners)).Scan(&id)
+// return id, err
+// }
+
+// insertBlockPrevNext inserts a new row of the block_chain table.
+func insertBlockPrevNext(db *sql.DB, blockDbID uint64,
+ hash, prev, next *dbtypes.ChainHash) error {
rows, err := db.Query(internal.InsertBlockPrevNext, blockDbID, prev, hash, next)
if err == nil {
return rows.Close()
@@ -3644,24 +3640,25 @@ func InsertBlockPrevNext(db *sql.DB, blockDbID uint64,
return err
}
-// InsertBlockStats inserts the block stats into the stats table.
-func InsertBlockStats(db *sql.DB, blockDbID uint64, tpi *apitypes.TicketPoolInfo) error {
+// insertBlockStats inserts the block stats into the stats table.
+func insertBlockStats(db *sql.DB, blockDbID uint64, tpi *apitypes.TicketPoolInfo) error {
_, err := db.Exec(internal.UpsertStats, blockDbID, tpi.Height, tpi.Size, int64(tpi.Value*dcrToAtoms))
return err
}
-// RetrieveBestBlockHeight gets the best block height and hash (main chain
+// retrieveBestBlockHeight gets the best block height and hash (main chain
// only). Be sure to check for sql.ErrNoRows.
-func RetrieveBestBlockHeight(ctx context.Context, db *sql.DB) (height uint64, hash string, id uint64, err error) {
+func retrieveBestBlockHeight(ctx context.Context, db *sql.DB) (height uint64, hash dbtypes.ChainHash, err error) {
+ var id uint64 // maybe remove from query
err = db.QueryRowContext(ctx, internal.RetrieveBestBlockHeight).Scan(&id, &hash, &height)
return
}
-// RetrieveBestBlock gets the best block height and hash (main chain only). If
+// retrieveBestBlock gets the best block height and hash (main chain only). If
// there are no results from the query, the height is -1 and err is nil.
-func RetrieveBestBlock(ctx context.Context, db *sql.DB) (height int64, hash string, err error) {
+func retrieveBestBlock(ctx context.Context, db *sql.DB) (height int64, hash dbtypes.ChainHash, err error) {
var bbHeight uint64
- bbHeight, hash, _, err = RetrieveBestBlockHeight(ctx, db)
+ bbHeight, hash, err = retrieveBestBlockHeight(ctx, db)
height = int64(bbHeight)
if err != nil && err == sql.ErrNoRows {
height = -1
@@ -3670,43 +3667,44 @@ func RetrieveBestBlock(ctx context.Context, db *sql.DB) (height int64, hash stri
return
}
-// RetrieveBestBlockHeightAny gets the best block height, including side chains.
-func RetrieveBestBlockHeightAny(ctx context.Context, db *sql.DB) (height uint64, hash string, id uint64, err error) {
- err = db.QueryRowContext(ctx, internal.RetrieveBestBlockHeightAny).Scan(&id, &hash, &height)
- return
-}
+// retrieveBestBlockHeightAny gets the best block height, including side chains.
+// func retrieveBestBlockHeightAny(ctx context.Context, db *sql.DB) (height uint64, hash string, id uint64, err error) {
+// err = db.QueryRowContext(ctx, internal.retrieveBestBlockHeightAny).Scan(&id, &hash, &height)
+// return
+// }
-// RetrieveBlockHash retrieves the hash of the block at the given height, if it
+// retrieveBlockHash retrieves the hash of the block at the given height, if it
// exists (be sure to check error against sql.ErrNoRows!). WARNING: this returns
// the most recently added block at this height, but there may be others.
-func RetrieveBlockHash(ctx context.Context, db *sql.DB, idx int64) (hash string, err error) {
+func retrieveBlockHash(ctx context.Context, db *sql.DB, idx int64) (hash dbtypes.ChainHash, err error) {
err = db.QueryRowContext(ctx, internal.SelectBlockHashByHeight, idx).Scan(&hash)
return
}
-// RetrieveBlockTimeByHeight retrieves time hash of the main chain block at the
+// retrieveBlockTimeByHeight retrieves time hash of the main chain block at the
// given height, if it exists (be sure to check error against sql.ErrNoRows!).
-func RetrieveBlockTimeByHeight(ctx context.Context, db *sql.DB, idx int64) (time dbtypes.TimeDef, err error) {
+func retrieveBlockTimeByHeight(ctx context.Context, db *sql.DB, idx int64) (time dbtypes.TimeDef, err error) {
err = db.QueryRowContext(ctx, internal.SelectBlockTimeByHeight, idx).Scan(&time)
return
}
-// RetrieveBlockHeight retrieves the height of the block with the given hash, if
+// retrieveBlockHeight retrieves the height of the block with the given hash, if
// it exists (be sure to check error against sql.ErrNoRows!).
-func RetrieveBlockHeight(ctx context.Context, db *sql.DB, hash string) (height int64, err error) {
+func retrieveBlockHeight(ctx context.Context, db *sql.DB, hash dbtypes.ChainHash) (height int64, err error) {
err = db.QueryRowContext(ctx, internal.SelectBlockHeightByHash, hash).Scan(&height)
return
}
-// RetrieveBlockVoteCount gets the number of votes mined in a block.
-func RetrieveBlockVoteCount(ctx context.Context, db *sql.DB, hash string) (numVotes int16, err error) {
+// retrieveBlockVoteCount gets the number of votes mined in a block.
+func retrieveBlockVoteCount(ctx context.Context, db *sql.DB, hash dbtypes.ChainHash) (numVotes int16, err error) {
err = db.QueryRowContext(ctx, internal.SelectBlockVoteCount, hash).Scan(&numVotes)
return
}
-// RetrieveBlocksHashesAll retrieve the hash of every block in the blocks table,
+// retrieveBlocksHashesAll retrieve the hash of every block in the blocks table,
// ordered by their row ID.
-func RetrieveBlocksHashesAll(ctx context.Context, db *sql.DB) ([]string, error) {
+/*
+func retrieveBlocksHashesAll(ctx context.Context, db *sql.DB) ([]string, error) {
var hashes []string
rows, err := db.QueryContext(ctx, internal.SelectBlocksHashes)
if err != nil {
@@ -3727,18 +3725,11 @@ func RetrieveBlocksHashesAll(ctx context.Context, db *sql.DB) ([]string, error)
return hashes, err
}
+*/
-// RetrieveBlockChainDbID retrieves the row id in the block_chain table of the
-// block with the given hash, if it exists (be sure to check error against
-// sql.ErrNoRows!).
-func RetrieveBlockChainDbID(ctx context.Context, db *sql.DB, hash string) (dbID uint64, err error) {
- err = db.QueryRowContext(ctx, internal.SelectBlockChainRowIDByHash, hash).Scan(&dbID)
- return
-}
-
-// RetrieveSideChainBlocks retrieves the block chain status for all known side
+// retrieveSideChainBlocks retrieves the block chain status for all known side
// chain blocks.
-func RetrieveSideChainBlocks(ctx context.Context, db *sql.DB) (blocks []*dbtypes.BlockStatus, err error) {
+func retrieveSideChainBlocks(ctx context.Context, db *sql.DB) (blocks []*dbtypes.BlockStatus, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectSideChainBlocks)
if err != nil {
@@ -3760,9 +3751,10 @@ func RetrieveSideChainBlocks(ctx context.Context, db *sql.DB) (blocks []*dbtypes
return
}
-// RetrieveSideChainTips retrieves the block chain status for all known side
+// retrieveSideChainTips retrieves the block chain status for all known side
// chain tip blocks.
-func RetrieveSideChainTips(ctx context.Context, db *sql.DB) (blocks []*dbtypes.BlockStatus, err error) {
+/*
+func retrieveSideChainTips(ctx context.Context, db *sql.DB) (blocks []*dbtypes.BlockStatus, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectSideChainTips)
if err != nil {
@@ -3784,10 +3776,11 @@ func RetrieveSideChainTips(ctx context.Context, db *sql.DB) (blocks []*dbtypes.B
return
}
+*/
-// RetrieveDisapprovedBlocks retrieves the block chain status for all blocks
+// retrieveDisapprovedBlocks retrieves the block chain status for all blocks
// that had their regular transactions invalidated by stakeholder disapproval.
-func RetrieveDisapprovedBlocks(ctx context.Context, db *sql.DB) (blocks []*dbtypes.BlockStatus, err error) {
+func retrieveDisapprovedBlocks(ctx context.Context, db *sql.DB) (blocks []*dbtypes.BlockStatus, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectDisapprovedBlocks)
if err != nil {
@@ -3809,17 +3802,17 @@ func RetrieveDisapprovedBlocks(ctx context.Context, db *sql.DB) (blocks []*dbtyp
return
}
-// RetrieveBlockStatus retrieves the block chain status for the block with the
+// retrieveBlockStatus retrieves the block chain status for the block with the
// specified hash.
-func RetrieveBlockStatus(ctx context.Context, db *sql.DB, hash string) (bs dbtypes.BlockStatus, err error) {
+func retrieveBlockStatus(ctx context.Context, db *sql.DB, hash dbtypes.ChainHash) (bs dbtypes.BlockStatus, err error) {
err = db.QueryRowContext(ctx, internal.SelectBlockStatus, hash).Scan(&bs.IsValid,
&bs.IsMainchain, &bs.Height, &bs.PrevHash, &bs.Hash, &bs.NextHash)
return
}
-// RetrieveBlockStatuses retrieves the block chain statuses of all blocks at
+// retrieveBlockStatuses retrieves the block chain statuses of all blocks at
// the given height.
-func RetrieveBlockStatuses(ctx context.Context, db *sql.DB, idx int64) (blocks []*dbtypes.BlockStatus, err error) {
+func retrieveBlockStatuses(ctx context.Context, db *sql.DB, idx int64) (blocks []*dbtypes.BlockStatus, err error) {
var rows *sql.Rows
rows, err = db.QueryContext(ctx, internal.SelectBlockStatuses, idx)
if err != nil {
@@ -3841,17 +3834,17 @@ func RetrieveBlockStatuses(ctx context.Context, db *sql.DB, idx int64) (blocks [
return
}
-// RetrieveBlockFlags retrieves the block's is_valid and is_mainchain flags.
-func RetrieveBlockFlags(ctx context.Context, db *sql.DB, hash string) (isValid bool, isMainchain bool, err error) {
+// retrieveBlockFlags retrieves the block's is_valid and is_mainchain flags.
+func retrieveBlockFlags(ctx context.Context, db *sql.DB, hash dbtypes.ChainHash) (isValid bool, isMainchain bool, err error) {
err = db.QueryRowContext(ctx, internal.SelectBlockFlags, hash).Scan(&isValid, &isMainchain)
return
}
-// RetrieveBlockSummaryByTimeRange retrieves the slice of block summaries for
+// retrieveBlockSummaryByTimeRange retrieves the slice of block summaries for
// the given time range. The limit specifies the number of most recent block
// summaries to return. A limit of 0 indicates all blocks in the time range
// should be included.
-func RetrieveBlockSummaryByTimeRange(ctx context.Context, db *sql.DB, minTime, maxTime int64, limit int) ([]dbtypes.BlockDataBasic, error) {
+func retrieveBlockSummaryByTimeRange(ctx context.Context, db *sql.DB, minTime, maxTime int64, limit int) ([]dbtypes.BlockDataBasic, error) {
var blocks []dbtypes.BlockDataBasic
var stmt *sql.Stmt
var rows *sql.Rows
@@ -3900,16 +3893,16 @@ func RetrieveBlockSummaryByTimeRange(ctx context.Context, db *sql.DB, minTime, m
return blocks, nil
}
-// RetrievePreviousHashByBlockHash retrieves the previous block hash for the
+// retrievePreviousHashByBlockHash retrieves the previous block hash for the
// given block from the blocks table.
-func RetrievePreviousHashByBlockHash(ctx context.Context, db *sql.DB, hash string) (previousHash string, err error) {
- err = db.QueryRowContext(ctx, internal.SelectBlocksPreviousHash, hash).Scan(&previousHash)
- return
-}
+// func retrievePreviousHashByBlockHash(ctx context.Context, db *sql.DB, hash string) (previousHash string, err error) {
+// err = db.QueryRowContext(ctx, internal.SelectBlocksPreviousHash, hash).Scan(&previousHash)
+// return
+// }
-// SetMainchainByBlockHash is used to set the is_mainchain flag for the given
+// setMainchainByBlockHash is used to set the is_mainchain flag for the given
// block. This is required to handle a reorganization.
-func SetMainchainByBlockHash(db *sql.DB, hash string, isMainchain bool) (previousHash string, err error) {
+func setMainchainByBlockHash(db *sql.DB, hash dbtypes.ChainHash, isMainchain bool) (previousHash dbtypes.ChainHash, err error) {
err = db.QueryRow(internal.UpdateBlockMainchain, hash, isMainchain).Scan(&previousHash)
return
}
@@ -3918,7 +3911,7 @@ func SetMainchainByBlockHash(db *sql.DB, hash string, isMainchain bool) (previou
// UpdateTransactionsMainchain sets the is_mainchain column for the transactions
// in the specified block.
-func UpdateTransactionsMainchain(db *sql.DB, blockHash string, isMainchain bool) (int64, []uint64, error) {
+func updateTransactionsMainchain(db *sql.DB, blockHash dbtypes.ChainHash, isMainchain bool) (int64, []uint64, error) {
rows, err := db.Query(internal.UpdateTxnsMainchainByBlock, isMainchain, blockHash)
if err != nil {
return 0, nil, fmt.Errorf("failed to update transactions is_mainchain: %w", err)
@@ -3942,9 +3935,9 @@ func UpdateTransactionsMainchain(db *sql.DB, blockHash string, isMainchain bool)
return numRows, txRowIDs, err
}
-// UpdateTransactionsValid sets the is_valid column of the transactions table
+// updateTransactionsValid sets the is_valid column of the transactions table
// for the regular (non-stake) transactions in the specified block.
-func UpdateTransactionsValid(db *sql.DB, blockHash string, isValid bool) (int64, []uint64, error) {
+func updateTransactionsValid(db *sql.DB, blockHash dbtypes.ChainHash, isValid bool) (int64, []uint64, error) {
rows, err := db.Query(internal.UpdateRegularTxnsValidByBlock, isValid, blockHash)
if err != nil {
return 0, nil, fmt.Errorf("failed to update regular transactions is_valid: %w", err)
@@ -3968,9 +3961,9 @@ func UpdateTransactionsValid(db *sql.DB, blockHash string, isValid bool) (int64,
return numRows, txRowIDs, err
}
-// UpdateVotesMainchain sets the is_mainchain column for the votes in the
+// updateVotesMainchain sets the is_mainchain column for the votes in the
// specified block.
-func UpdateVotesMainchain(db SqlExecutor, blockHash string, isMainchain bool) (int64, error) {
+func updateVotesMainchain(db SqlExecutor, blockHash dbtypes.ChainHash, isMainchain bool) (int64, error) {
numRows, err := sqlExec(db, internal.UpdateVotesMainchainByBlock,
"failed to update votes is_mainchain: ", isMainchain, blockHash)
if err != nil {
@@ -3979,9 +3972,9 @@ func UpdateVotesMainchain(db SqlExecutor, blockHash string, isMainchain bool) (i
return numRows, nil
}
-// UpdateTicketsMainchain sets the is_mainchain column for the tickets in the
+// updateTicketsMainchain sets the is_mainchain column for the tickets in the
// specified block.
-func UpdateTicketsMainchain(db SqlExecutor, blockHash string, isMainchain bool) (int64, error) {
+func updateTicketsMainchain(db SqlExecutor, blockHash dbtypes.ChainHash, isMainchain bool) (int64, error) {
numRows, err := sqlExec(db, internal.UpdateTicketsMainchainByBlock,
"failed to update tickets is_mainchain: ", isMainchain, blockHash)
if err != nil {
@@ -3990,9 +3983,9 @@ func UpdateTicketsMainchain(db SqlExecutor, blockHash string, isMainchain bool)
return numRows, nil
}
-// UpdateTreasuryMainchain sets the is_mainchain column for the entires in the
+// updateTreasuryMainchain sets the is_mainchain column for the entires in the
// specified block.
-func UpdateTreasuryMainchain(db SqlExecutor, blockHash string, isMainchain bool) (int64, error) {
+func updateTreasuryMainchain(db SqlExecutor, blockHash dbtypes.ChainHash, isMainchain bool) (int64, error) {
numRows, err := sqlExec(db, internal.UpdateTreasuryMainchainByBlock,
"failed to update treasury txns is_mainchain: ", isMainchain, blockHash)
if err != nil {
@@ -4009,6 +4002,10 @@ func binnedTreasuryIO(ctx context.Context, db *sql.DB, timeInterval string) (*db
return parseRowsSentReceived(rows)
}
+func toCoin[T int64 | uint64](amt T) float64 {
+ return float64(amt) / 1e8
+}
+
func parseRowsSentReceived(rows *sql.Rows) (*dbtypes.ChartsData, error) {
defer closeRows(rows)
var items = new(dbtypes.ChartsData)
@@ -4021,13 +4018,13 @@ func parseRowsSentReceived(rows *sql.Rows) (*dbtypes.ChartsData, error) {
}
items.Time = append(items.Time, dbtypes.NewTimeDef(blockTime))
- items.Received = append(items.Received, dcrutil.Amount(received).ToCoin())
- items.Sent = append(items.Sent, dcrutil.Amount(sent).ToCoin())
+ items.Received = append(items.Received, toCoin(received))
+ items.Sent = append(items.Sent, toCoin(sent))
// Net represents the difference between the received and sent amount for a
// given block. If the difference is positive then the value is unspent amount
// otherwise if the value is zero then all amount is spent and if the net amount
// is negative then for the given block more amount was sent than received.
- items.Net = append(items.Net, dcrutil.Amount(received-sent).ToCoin())
+ items.Net = append(items.Net, toCoin(received-sent))
}
if err := rows.Err(); err != nil {
return nil, err
@@ -4036,9 +4033,9 @@ func parseRowsSentReceived(rows *sql.Rows) (*dbtypes.ChartsData, error) {
return items, nil
}
-// UpdateAddressesMainchainByIDs sets the valid_mainchain column for the
+// updateAddressesMainchainByIDs sets the valid_mainchain column for the
// addresses specified by their vin (spending) or vout (funding) row IDs.
-func UpdateAddressesMainchainByIDs(db SqlExecQueryer, vinsBlk, voutsBlk []dbtypes.UInt64Array,
+func updateAddressesMainchainByIDs(db SqlExecQueryer, vinsBlk, voutsBlk []dbtypes.UInt64Array,
isValidMainchain bool) (addresses []string, numSpendingRows, numFundingRows int64, err error) {
addrs := make(map[string]struct{}, len(vinsBlk)+len(voutsBlk)) // may be over-alloc
// Spending/vins: Set valid_mainchain for the is_funding=false addresses
@@ -4086,9 +4083,9 @@ func UpdateAddressesMainchainByIDs(db SqlExecQueryer, vinsBlk, voutsBlk []dbtype
return
}
-// UpdateLastBlockValid updates the is_valid column of the block specified by
+// updateLastBlockValid updates the is_valid column of the block specified by
// the row id for the blocks table.
-func UpdateLastBlockValid(db SqlExecutor, blockDbID uint64, isValid bool) error {
+func updateLastBlockValid(db SqlExecutor, blockDbID uint64, isValid bool) error {
numRows, err := sqlExec(db, internal.UpdateLastBlockValid,
"failed to update last block validity: ", blockDbID, isValid)
if err != nil {
@@ -4101,7 +4098,7 @@ func UpdateLastBlockValid(db SqlExecutor, blockDbID uint64, isValid bool) error
return nil
}
-func clearVoutRegularSpendTxRowIDs(db SqlExecutor, invalidatedBlockHash string) (int64, error) {
+func clearVoutRegularSpendTxRowIDs(db SqlExecutor, invalidatedBlockHash dbtypes.ChainHash) (int64, error) {
return sqlExec(db, `UPDATE vouts SET spend_tx_row_id = NULL
FROM transactions
WHERE transactions.tree=0
@@ -4111,7 +4108,7 @@ func clearVoutRegularSpendTxRowIDs(db SqlExecutor, invalidatedBlockHash string)
invalidatedBlockHash)
}
-func clearVoutAllSpendTxRowIDs(db SqlExecutor, transactionsBlockHash string) (int64, error) {
+func clearVoutAllSpendTxRowIDs(db SqlExecutor, transactionsBlockHash dbtypes.ChainHash) (int64, error) {
return sqlExec(db, `UPDATE vouts SET spend_tx_row_id = NULL
FROM transactions
WHERE transactions.block_hash=$1
@@ -4120,14 +4117,14 @@ func clearVoutAllSpendTxRowIDs(db SqlExecutor, transactionsBlockHash string) (in
transactionsBlockHash)
}
-// UpdateLastVins updates the is_valid and is_mainchain columns in the vins
+// updateLastVins updates the is_valid and is_mainchain columns in the vins
// table for all of the transactions in the block specified by the given block
// hash.
-func UpdateLastVins(db *sql.DB, blockHash string, isValid, isMainchain bool) error {
- // Retrieve the hash for every transaction in this block. A context with no
+func updateLastVins(db *sql.DB, blockHash dbtypes.ChainHash, isValid, isMainchain bool) error {
+ // retrieve the hash for every transaction in this block. A context with no
// deadline or cancellation function is used since this UpdateLastVins needs
// to complete to ensure DB integrity.
- _, txs, _, trees, timestamps, err := RetrieveTxsByBlockHash(context.Background(), db, blockHash)
+ txs, _, trees, timestamps, err := retrieveTxsByBlockHash(context.Background(), db, blockHash)
if err != nil {
return err
}
@@ -4149,23 +4146,23 @@ func UpdateLastVins(db *sql.DB, blockHash string, isValid, isMainchain bool) err
return nil
}
-// UpdateLastAddressesValid sets valid_mainchain as specified by isValid for
+// updateLastAddressesValid sets valid_mainchain as specified by isValid for
// addresses table rows pertaining to regular (non-stake) transactions found in
// the given block.
-func UpdateLastAddressesValid(db *sql.DB, blockHash string, isValid bool) ([]string, error) {
+func updateLastAddressesValid(db *sql.DB, blockHash dbtypes.ChainHash, isValid bool) ([]string, error) {
// The queries in this function should not timeout or (probably) canceled,
// so use a background context.
ctx := context.Background()
// Get the row ids of all vins and vouts of regular txns in this block.
onlyRegularTxns := true
- vinDbIDsBlk, voutDbIDsBlk, _, err := RetrieveTxnsVinsVoutsByBlock(ctx, db, blockHash, onlyRegularTxns)
+ vinDbIDsBlk, voutDbIDsBlk, _, err := retrieveTxnsVinsVoutsByBlock(ctx, db, blockHash, onlyRegularTxns)
if err != nil {
return nil, fmt.Errorf("unable to retrieve vin data for block %s: %w", blockHash, err)
}
// Using vins and vouts row ids, update the valid_mainchain column of the
// rows of the address table referring to these vins and vouts.
- addresses, numAddrSpending, numAddrFunding, err := UpdateAddressesMainchainByIDs(db,
+ addresses, numAddrSpending, numAddrFunding, err := updateAddressesMainchainByIDs(db,
vinDbIDsBlk, voutDbIDsBlk, isValid)
if err != nil {
log.Errorf("Failed to set addresses rows in block %s as sidechain: %w", blockHash, err)
@@ -4175,9 +4172,9 @@ func UpdateLastAddressesValid(db *sql.DB, blockHash string, isValid bool) ([]str
return addresses, err
}
-// UpdateBlockNext sets the next block's hash for the specified row of the
+// updateBlockNext sets the next block's hash for the specified row of the
// block_chain table specified by DB row ID.
-func UpdateBlockNext(db SqlExecutor, blockDbID uint64, next string) error {
+func updateBlockNext(db SqlExecutor, blockDbID uint64, next dbtypes.ChainHash) error {
res, err := db.Exec(internal.UpdateBlockNext, blockDbID, next)
if err != nil {
return err
@@ -4192,9 +4189,9 @@ func UpdateBlockNext(db SqlExecutor, blockDbID uint64, next string) error {
return nil
}
-// UpdateBlockNextByHash sets the next block's hash for the block in the
+// updateBlockNextByHash sets the next block's hash for the block in the
// block_chain table specified by hash.
-func UpdateBlockNextByHash(db SqlExecutor, this, next string) error {
+func updateBlockNextByHash(db SqlExecutor, this, next dbtypes.ChainHash) error {
res, err := db.Exec(internal.UpdateBlockNextByHash, this, next)
if err != nil {
return err
@@ -4209,9 +4206,9 @@ func UpdateBlockNextByHash(db SqlExecutor, this, next string) error {
return nil
}
-// UpdateBlockNextByNextHash sets the next block's hash for the block in the
+// updateBlockNextByNextHash sets the next block's hash for the block in the
// block_chain table with a current next_hash specified by hash.
-func UpdateBlockNextByNextHash(db SqlExecutor, currentNext, newNext string) error {
+func updateBlockNextByNextHash(db SqlExecutor, currentNext, newNext dbtypes.ChainHash) error {
res, err := db.Exec(internal.UpdateBlockNextByNextHash, currentNext, newNext)
if err != nil {
return err
@@ -4226,41 +4223,31 @@ func UpdateBlockNextByNextHash(db SqlExecutor, currentNext, newNext string) erro
return nil
}
-// RetrievePoolInfo returns ticket pool info for block height ind
-func RetrievePoolInfo(ctx context.Context, db *sql.DB, ind int64) (*apitypes.TicketPoolInfo, error) {
+// retrievePoolInfo returns ticket pool info for block height ind
+func retrievePoolInfo(ctx context.Context, db *sql.DB, ind int64) (*apitypes.TicketPoolInfo, error) {
tpi := &apitypes.TicketPoolInfo{
Height: uint32(ind),
}
- var hash string
- var winners []string
+ var hash dbtypes.ChainHash // trash?
+ var winners dbtypes.ChainHashArray
var val int64
err := db.QueryRowContext(ctx, internal.SelectPoolInfoByHeight, ind).Scan(&hash, &tpi.Size,
- &val, pq.Array(&winners))
- tpi.Value = dcrutil.Amount(val).ToCoin()
- tpi.ValAvg = tpi.Value / float64(tpi.Size)
- tpi.Winners = winners
- return tpi, err
-}
-
-// RetrievePoolInfoByHash returns ticket pool info for blockhash hash.
-func RetrievePoolInfoByHash(ctx context.Context, db *sql.DB, hash string) (*apitypes.TicketPoolInfo, error) {
- tpi := new(apitypes.TicketPoolInfo)
- var winners []string
- var val int64
- err := db.QueryRowContext(ctx, internal.SelectPoolInfoByHash, hash).Scan(&tpi.Height, &tpi.Size,
- &val, pq.Array(&winners))
- tpi.Value = dcrutil.Amount(val).ToCoin()
+ &val, &winners)
+ tpi.Value = toCoin(val)
tpi.ValAvg = tpi.Value / float64(tpi.Size)
- tpi.Winners = winners
+ tpi.Winners = make([]string, len(winners))
+ for i := range winners {
+ tpi.Winners[i] = winners[i].String()
+ }
return tpi, err
}
-// RetrievePoolInfoRange returns an array of apitypes.TicketPoolInfo for block
+// retrievePoolInfoRange returns an array of apitypes.TicketPoolInfo for block
// range ind0 to ind1 and a non-nil error on success
-func RetrievePoolInfoRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]apitypes.TicketPoolInfo, []string, error) {
+func retrievePoolInfoRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]apitypes.TicketPoolInfo, []dbtypes.ChainHash, error) {
N := ind1 - ind0 + 1
if N == 0 {
- return []apitypes.TicketPoolInfo{}, []string{}, nil
+ return []apitypes.TicketPoolInfo{}, []dbtypes.ChainHash{}, nil
}
if N < 0 {
return nil, nil, fmt.Errorf("Cannot retrieve pool info range (%d>%d)",
@@ -4268,7 +4255,7 @@ func RetrievePoolInfoRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([
}
tpis := make([]apitypes.TicketPoolInfo, 0, N)
- hashes := make([]string, 0, N)
+ hashes := make([]dbtypes.ChainHash, 0, N)
stmt, err := db.PrepareContext(ctx, internal.SelectPoolInfoRange)
if err != nil {
@@ -4285,17 +4272,20 @@ func RetrievePoolInfoRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([
for rows.Next() {
var tpi apitypes.TicketPoolInfo
- var hash string
- var winners []string
+ var hash dbtypes.ChainHash
+ var winners dbtypes.ChainHashArray
var val int64
- if err = rows.Scan(&tpi.Height, &hash, &tpi.Size, &val,
- pq.Array(&winners)); err != nil {
+ if err = rows.Scan(&tpi.Height, &hash, &tpi.Size, &val, &winners); err != nil {
log.Errorf("Unable to scan for TicketPoolInfo fields: %v", err)
return nil, nil, err
}
- tpi.Value = dcrutil.Amount(val).ToCoin()
+ tpi.Value = toCoin(val)
tpi.ValAvg = tpi.Value / float64(tpi.Size)
- tpi.Winners = winners
+
+ tpi.Winners = make([]string, len(winners))
+ for i := range winners {
+ tpi.Winners[i] = winners[i].String()
+ }
tpis = append(tpis, tpi)
hashes = append(hashes, hash)
}
@@ -4306,9 +4296,9 @@ func RetrievePoolInfoRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([
return tpis, hashes, nil
}
-// RetrievePoolValAndSizeRange returns an array each of the pool values and
+// retrievePoolValAndSizeRange returns an array each of the pool values and
// sizes for block range ind0 to ind1.
-func RetrievePoolValAndSizeRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]float64, []uint32, error) {
+func retrievePoolValAndSizeRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]float64, []uint32, error) {
N := ind1 - ind0 + 1
if N == 0 {
return []float64{}, []uint32{}, nil
@@ -4341,7 +4331,7 @@ func RetrievePoolValAndSizeRange(ctx context.Context, db *sql.DB, ind0, ind1 int
log.Errorf("Unable to scan for TicketPoolInfo fields: %v", err)
return nil, nil, err
}
- poolvals = append(poolvals, dcrutil.Amount(pval).ToCoin())
+ poolvals = append(poolvals, toCoin(pval))
poolsizes = append(poolsizes, psize)
}
if err = rows.Err(); err != nil {
@@ -4349,61 +4339,70 @@ func RetrievePoolValAndSizeRange(ctx context.Context, db *sql.DB, ind0, ind1 int
}
if len(poolsizes) != int(N) {
- log.Warnf("RetrievePoolValAndSizeRange: Retrieved pool values (%d) not expected number (%d)",
+ log.Warnf("retrievePoolValAndSizeRange: retrieved pool values (%d) not expected number (%d)",
len(poolsizes), N)
}
return poolvals, poolsizes, nil
}
-// RetrieveBlockSummary fetches basic block data for block ind.
-func RetrieveBlockSummary(ctx context.Context, db *sql.DB, ind int64) (*apitypes.BlockDataBasic, error) {
+// retrieveBlockSummary fetches basic block data for block ind.
+func retrieveBlockSummary(ctx context.Context, db *sql.DB, ind int64) (*apitypes.BlockDataBasic, error) {
bd := apitypes.NewBlockDataBasic()
- var winners []string
+ var winners dbtypes.ChainHashArray
var isValid bool
var val, sbits int64
+ var hash dbtypes.ChainHash
var timestamp dbtypes.TimeDef
err := db.QueryRowContext(ctx, internal.SelectBlockDataByHeight, ind).Scan(
- &bd.Hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
- &bd.PoolInfo.Size, &val, pq.Array(&winners), &isValid)
+ &hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
+ &bd.PoolInfo.Size, &val, &winners, &isValid)
if err != nil {
return nil, err
}
- bd.PoolInfo.Value = dcrutil.Amount(val).ToCoin()
+ bd.Hash = hash.String()
+ bd.PoolInfo.Value = toCoin(val)
bd.PoolInfo.ValAvg = bd.PoolInfo.Value / float64(bd.Size)
bd.Time = apitypes.TimeAPI{S: timestamp}
- bd.PoolInfo.Winners = winners
- bd.StakeDiff = dcrutil.Amount(sbits).ToCoin()
+ bd.PoolInfo.Winners = make([]string, len(winners))
+ for i := range winners {
+ bd.PoolInfo.Winners[i] = winners[i].String()
+ }
+ bd.StakeDiff = toCoin(sbits)
return bd, nil
}
-// RetrieveBlockSummaryByHash fetches basic block data for block hash.
-func RetrieveBlockSummaryByHash(ctx context.Context, db *sql.DB, hash string) (*apitypes.BlockDataBasic, error) {
+// retrieveBlockSummaryByHash fetches basic block data for block hash.
+func retrieveBlockSummaryByHash(ctx context.Context, db *sql.DB, hash dbtypes.ChainHash) (*apitypes.BlockDataBasic, error) {
bd := apitypes.NewBlockDataBasic()
- var winners []string
+ var winners dbtypes.ChainHashArray
var isMainchain, isValid bool
var timestamp dbtypes.TimeDef
var val, psize sql.NullInt64 // pool value and size are only stored for mainchain blocks
var sbits int64
err := db.QueryRowContext(ctx, internal.SelectBlockDataByHash, hash).Scan(
- &bd.Hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
- &psize, &val, pq.Array(&winners), &isMainchain, &isValid)
+ &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
+ &psize, &val, &winners, &isMainchain, &isValid)
if err != nil {
return nil, err
}
- bd.PoolInfo.Value = dcrutil.Amount(val.Int64).ToCoin()
+ bd.Hash = hash.String()
+ bd.PoolInfo.Value = toCoin(val.Int64)
bd.PoolInfo.Size = uint32(psize.Int64)
bd.PoolInfo.ValAvg = bd.PoolInfo.Value / float64(bd.Size)
bd.Time = apitypes.TimeAPI{S: timestamp}
- bd.PoolInfo.Winners = winners
- bd.StakeDiff = dcrutil.Amount(sbits).ToCoin()
+ bd.PoolInfo.Winners = make([]string, len(winners))
+ for i := range winners {
+ bd.PoolInfo.Winners[i] = winners[i].String()
+ }
+ bd.StakeDiff = toCoin(sbits)
return bd, nil
}
-// RetrieveBlockSummaryRange fetches basic block data for the blocks in range
+// retrieveBlockSummaryRange fetches basic block data for the blocks in range
// (ind0, ind1).
-func RetrieveBlockSummaryRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]*apitypes.BlockDataBasic, error) {
+func retrieveBlockSummaryRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]*apitypes.BlockDataBasic, error) {
var desc bool
low, high := ind0, ind1
if low > high {
@@ -4426,22 +4425,27 @@ func RetrieveBlockSummaryRange(ctx context.Context, db *sql.DB, ind0, ind1 int64
defer rows.Close()
for rows.Next() {
bd := apitypes.NewBlockDataBasic()
- var winners []string
+ var winners dbtypes.ChainHashArray
var isValid bool
var val, sbits int64
var timestamp dbtypes.TimeDef
+ var hash dbtypes.ChainHash
err := rows.Scan(
- &bd.Hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
- &bd.PoolInfo.Size, &val, pq.Array(&winners), &isValid,
+ &hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
+ &bd.PoolInfo.Size, &val, &winners, &isValid,
)
if err != nil {
return nil, err
}
- bd.PoolInfo.Value = dcrutil.Amount(val).ToCoin()
+ bd.Hash = hash.String()
+ bd.PoolInfo.Value = toCoin(val)
bd.PoolInfo.ValAvg = bd.PoolInfo.Value / float64(bd.Size)
bd.Time = apitypes.TimeAPI{S: timestamp}
- bd.PoolInfo.Winners = winners
- bd.StakeDiff = dcrutil.Amount(sbits).ToCoin()
+ bd.PoolInfo.Winners = make([]string, len(winners))
+ for i := range winners {
+ bd.PoolInfo.Winners[i] = winners[i].String()
+ }
+ bd.StakeDiff = toCoin(sbits)
blocks = append(blocks, bd)
}
if err = rows.Err(); err != nil {
@@ -4452,9 +4456,9 @@ func RetrieveBlockSummaryRange(ctx context.Context, db *sql.DB, ind0, ind1 int64
return blocks, nil
}
-// RetrieveBlockSummaryRangeStepped fetches basic block data for every step'th
+// retrieveBlockSummaryRangeStepped fetches basic block data for every step'th
// block in range (ind0, ind1).
-func RetrieveBlockSummaryRangeStepped(ctx context.Context, db *sql.DB, ind0, ind1, step int64) ([]*apitypes.BlockDataBasic, error) {
+func retrieveBlockSummaryRangeStepped(ctx context.Context, db *sql.DB, ind0, ind1, step int64) ([]*apitypes.BlockDataBasic, error) {
var desc bool
stepMod := ind0 % step
low, high := ind0, ind1
@@ -4479,22 +4483,27 @@ func RetrieveBlockSummaryRangeStepped(ctx context.Context, db *sql.DB, ind0, ind
defer rows.Close()
for rows.Next() {
bd := apitypes.NewBlockDataBasic()
- var winners []string
+ var winners dbtypes.ChainHashArray
var isValid bool
var val, sbits int64
var timestamp dbtypes.TimeDef
+ var hash dbtypes.ChainHash
err := rows.Scan(
- &bd.Hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
- &bd.PoolInfo.Size, &val, pq.Array(&winners), &isValid,
+ &hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
+ &bd.PoolInfo.Size, &val, &winners, &isValid,
)
if err != nil {
return nil, err
}
- bd.PoolInfo.Value = dcrutil.Amount(val).ToCoin()
+ bd.Hash = hash.String()
+ bd.PoolInfo.Value = toCoin(val)
bd.PoolInfo.ValAvg = bd.PoolInfo.Value / float64(bd.Size)
bd.Time = apitypes.TimeAPI{S: timestamp}
- bd.PoolInfo.Winners = winners
- bd.StakeDiff = dcrutil.Amount(sbits).ToCoin()
+ bd.PoolInfo.Winners = make([]string, len(winners))
+ for i := range winners {
+ bd.PoolInfo.Winners[i] = winners[i].String()
+ }
+ bd.StakeDiff = toCoin(sbits)
blocks = append(blocks, bd)
}
if err = rows.Err(); err != nil {
@@ -4505,8 +4514,8 @@ func RetrieveBlockSummaryRangeStepped(ctx context.Context, db *sql.DB, ind0, ind
return blocks, nil
}
-// RetrieveBlockSize return the size of block at height ind.
-func RetrieveBlockSize(ctx context.Context, db *sql.DB, ind int64) (int32, error) {
+// retrieveBlockSize return the size of block at height ind.
+func retrieveBlockSize(ctx context.Context, db *sql.DB, ind int64) (int32, error) {
var blockSize int32
err := db.QueryRowContext(ctx, internal.SelectBlockSizeByHeight, ind).Scan(&blockSize)
if err != nil {
@@ -4516,8 +4525,8 @@ func RetrieveBlockSize(ctx context.Context, db *sql.DB, ind int64) (int32, error
return blockSize, nil
}
-// RetrieveBlockSizeRange returns an array of block sizes for block range ind0 to ind1
-func RetrieveBlockSizeRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]int32, error) {
+// retrieveBlockSizeRange returns an array of block sizes for block range ind0 to ind1
+func retrieveBlockSizeRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]int32, error) {
N := ind1 - ind0 + 1
if N == 0 {
return []int32{}, nil
@@ -4557,25 +4566,25 @@ func RetrieveBlockSizeRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) (
return blockSizes, nil
}
-// RetrieveSDiff returns the stake difficulty for block at the specified chain
+// retrieveSDiff returns the stake difficulty for block at the specified chain
// height.
-func RetrieveSDiff(ctx context.Context, db *sql.DB, ind int64) (float64, error) {
+func retrieveSDiff(ctx context.Context, db *sql.DB, ind int64) (float64, error) {
var sbits int64
err := db.QueryRowContext(ctx, internal.SelectSBitsByHeight, ind).Scan(&sbits)
- return dcrutil.Amount(sbits).ToCoin(), err
+ return toCoin(sbits), err
}
-// RetrieveSBitsByHash returns the stake difficulty in atoms for the specified
+// retrieveSBitsByHash returns the stake difficulty in atoms for the specified
// block.
-func RetrieveSBitsByHash(ctx context.Context, db *sql.DB, hash string) (int64, error) {
+func retrieveSBitsByHash(ctx context.Context, db *sql.DB, hash dbtypes.ChainHash) (int64, error) {
var sbits int64
err := db.QueryRowContext(ctx, internal.SelectSBitsByHash, hash).Scan(&sbits)
return sbits, err
}
-// RetrieveSDiffRange returns an array of stake difficulties for block range
+// retrieveSDiffRange returns an array of stake difficulties for block range
// ind0 to ind1.
-func RetrieveSDiffRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]float64, error) {
+func retrieveSDiffRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]float64, error) {
N := ind1 - ind0 + 1
if N == 0 {
return []float64{}, nil
@@ -4605,7 +4614,7 @@ func RetrieveSDiffRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]fl
log.Errorf("Unable to scan for sdiff fields: %v", err)
return nil, err
}
- sdiffs = append(sdiffs, dcrutil.Amount(sbits).ToCoin())
+ sdiffs = append(sdiffs, toCoin(sbits))
}
if err = rows.Err(); err != nil {
return nil, err
@@ -4614,31 +4623,36 @@ func RetrieveSDiffRange(ctx context.Context, db *sql.DB, ind0, ind1 int64) ([]fl
return sdiffs, nil
}
-// RetrieveLatestBlockSummary returns the block summary for the best block.
-func RetrieveLatestBlockSummary(ctx context.Context, db *sql.DB) (*apitypes.BlockDataBasic, error) {
+// retrieveLatestBlockSummary returns the block summary for the best block.
+func retrieveLatestBlockSummary(ctx context.Context, db *sql.DB) (*apitypes.BlockDataBasic, error) {
bd := apitypes.NewBlockDataBasic()
- var winners []string
+ var winners dbtypes.ChainHashArray
var timestamp dbtypes.TimeDef
var isValid bool
var val, sbits int64
+ var hash dbtypes.ChainHash
err := db.QueryRowContext(ctx, internal.SelectBlockDataBest).Scan(
- &bd.Hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
- &bd.PoolInfo.Size, &val, pq.Array(&winners), &isValid)
+ &hash, &bd.Height, &bd.Size, &bd.Difficulty, &sbits, ×tamp,
+ &bd.PoolInfo.Size, &val, &winners, &isValid)
if err != nil {
return nil, err
}
- bd.PoolInfo.Value = dcrutil.Amount(val).ToCoin()
+ bd.Hash = hash.String()
+ bd.PoolInfo.Value = toCoin(val)
bd.PoolInfo.ValAvg = bd.PoolInfo.Value / float64(bd.PoolInfo.Size)
bd.Time = apitypes.TimeAPI{S: timestamp}
- bd.PoolInfo.Winners = winners
- bd.StakeDiff = dcrutil.Amount(sbits).ToCoin()
+ bd.PoolInfo.Winners = make([]string, len(winners))
+ for i := range winners {
+ bd.PoolInfo.Winners = append(bd.PoolInfo.Winners, winners[i].String())
+ }
+ bd.StakeDiff = toCoin(sbits)
return bd, nil
}
-// RetrieveDiff returns the difficulty for the first block mined after the
+// retrieveDiff returns the difficulty for the first block mined after the
// provided UNIX timestamp.
-func RetrieveDiff(ctx context.Context, db *sql.DB, timestamp int64) (float64, error) {
+func retrieveDiff(ctx context.Context, db *sql.DB, timestamp int64) (float64, error) {
var diff float64
tDef := dbtypes.NewTimeDefFromUNIX(timestamp)
err := db.QueryRowContext(ctx, internal.SelectDiffByTime, tDef).Scan(&diff)
diff --git a/db/dcrpg/rewind.go b/db/dcrpg/rewind.go
index ede7310ac..1b76af95a 100644
--- a/db/dcrpg/rewind.go
+++ b/db/dcrpg/rewind.go
@@ -47,19 +47,19 @@ import (
"github.com/decred/dcrdata/v8/db/dbtypes"
)
-func deleteMissesForBlock(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteMissesForBlock(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteMisses, "failed to delete misses", hash)
}
-func deleteVotesForBlock(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteVotesForBlock(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteVotes, "failed to delete votes", hash)
}
-func deleteTicketsForBlock(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteTicketsForBlock(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteTickets, "failed to delete tickets", hash)
}
-func deleteTreasuryTxnsForBlock(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteTreasuryTxnsForBlock(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteTreasuryTxns, "failed to delete treasury txns", hash)
}
@@ -67,7 +67,7 @@ func deleteSwapsForBlockHeight(dbTx SqlExecutor, height int64) (rowsDeleted int6
return sqlExec(dbTx, internal.DeleteSwaps, "failed to delete swaps", height)
}
-func deleteTransactionsForBlock(dbTx *sql.Tx, hash string) (txRowIds []int64, err error) {
+func deleteTransactionsForBlock(dbTx *sql.Tx, hash dbtypes.ChainHash) (txRowIds []int64, err error) {
var rows *sql.Rows
rows, err = dbTx.Query(internal.DeleteTransactionsSimple, hash)
if err != nil {
@@ -90,72 +90,52 @@ func deleteTransactionsForBlock(dbTx *sql.Tx, hash string) (txRowIds []int64, er
return
}
-func deleteVoutsForBlock(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
- return sqlExec(dbTx, internal.DeleteVouts, "failed to delete vouts", hash)
-}
+// func deleteVoutsForBlock(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+// return sqlExec(dbTx, internal.DeleteVouts, "failed to delete vouts", hash)
+// }
-func deleteVoutsForBlockSubQry(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteVoutsForBlockSubQry(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteVoutsSubQry, "failed to delete vouts", hash)
}
-func deleteVinsForBlockSubQry(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteVinsForBlockSubQry(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteVinsSubQry, "failed to delete vins", hash)
}
-func deleteAddressesForBlockSubQry(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteAddressesForBlockSubQry(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteAddressesSubQry, "failed to delete addresses", hash)
}
-func deleteBlock(dbTx SqlExecutor, hash string) (rowsDeleted int64, err error) {
+func deleteBlock(dbTx SqlExecutor, hash dbtypes.ChainHash) (rowsDeleted int64, err error) {
return sqlExec(dbTx, internal.DeleteBlock, "failed to delete block", hash)
}
-func deleteBlockFromChain(dbTx *sql.Tx, hash string) (err error) {
+func deleteBlockFromChain(dbTx *sql.Tx, hash dbtypes.ChainHash) (err error) {
// Delete the row from block_chain where this_hash is the specified hash,
// returning the previous block hash in the chain.
- var prevHash string
+ var prevHash dbtypes.ChainHash
err = dbTx.QueryRow(internal.DeleteBlockFromChain, hash).Scan(&prevHash)
if err != nil {
// If a row with this_hash was not found, and thus prev_hash is not set,
// attempt to locate a row with next_hash set to the hash of this block,
- // and set it to the empty string.
+ // and set it to the empty hash.
if err == sql.ErrNoRows {
log.Warnf("Block %v not found in block_chain.this_hash column.", hash)
- err = UpdateBlockNextByNextHash(dbTx, hash, "")
+ err = updateBlockNextByNextHash(dbTx, hash, dbtypes.ChainHash{})
}
return
}
// For any row where next_hash is the prev_hash of the removed row, set
// next_hash to and empty string since that block is no longer in the chain.
- return UpdateBlockNextByHash(dbTx, prevHash, "")
-}
-
-// RetrieveTxsBlocksAboveHeight returns all distinct mainchain block heights and
-// hashes referenced in the transactions table above the given height.
-func RetrieveTxsBlocksAboveHeight(ctx context.Context, db *sql.DB, height int64) (heights []int64, hashes []string, err error) {
- var rows *sql.Rows
- rows, err = db.QueryContext(ctx, internal.SelectTxsBlocksAboveHeight, height)
- if err != nil {
- return
- }
-
- for rows.Next() {
- var height int64
- var hash string
- if err = rows.Scan(&height, &hash); err != nil {
- return nil, nil, err
- }
- heights = append(heights, height)
- hashes = append(hashes, hash)
- }
- return
+ return updateBlockNextByHash(dbTx, prevHash, dbtypes.ChainHash{})
}
-// RetrieveTxsBestBlockMainchain returns the best mainchain block's height from
+// retrieveTxsBestBlockMainchain returns the best mainchain block's height from
// the transactions table. If the table is empty, a height of -1, an empty hash
// string, and a nil error are returned
-func RetrieveTxsBestBlockMainchain(ctx context.Context, db *sql.DB) (height int64, hash string, err error) {
+/*
+func retrieveTxsBestBlockMainchain(ctx context.Context, db *sql.DB) (height int64, hash string, err error) {
err = db.QueryRowContext(ctx, internal.SelectTxsBestBlock).Scan(&height, &hash)
if err == sql.ErrNoRows {
err = nil
@@ -163,12 +143,13 @@ func RetrieveTxsBestBlockMainchain(ctx context.Context, db *sql.DB) (height int6
}
return
}
+*/
-// DeleteBlockData removes all data for the specified block from every table.
+// deleteBlockData removes all data for the specified block from every table.
// Data are removed from tables in the following order: vins, vouts, addresses,
// transactions, tickets, votes, misses, blocks, block_chain.
// WARNING: When no indexes are present, these queries are VERY SLOW.
-func DeleteBlockData(ctx context.Context, db *sql.DB, hash string, height int64) (res dbtypes.DeletionSummary, err error) {
+func deleteBlockData(ctx context.Context, db *sql.DB, hash dbtypes.ChainHash, height int64) (res dbtypes.DeletionSummary, err error) {
// The data purge is an all or nothing operation (no partial removal of
// data), so use a common sql.Tx for all deletions, and Commit in this
// function rather after each deletion.
@@ -309,48 +290,48 @@ func DeleteBlockData(ctx context.Context, db *sql.DB, hash string, height int64)
return
}
-// DeleteBestBlock removes all data for the best block in the DB from every
+// deleteBestBlock removes all data for the best block in the DB from every
// table via DeleteBlockData. The returned height and hash are for the best
// block after successful data removal, or the initial best block if removal
// fails as indicated by a non-nil error value.
-func DeleteBestBlock(ctx context.Context, db *sql.DB) (res dbtypes.DeletionSummary, height int64, hash string, err error) {
- height, hash, err = RetrieveBestBlock(ctx, db)
+func deleteBestBlock(ctx context.Context, db *sql.DB) (res dbtypes.DeletionSummary, height int64, hash dbtypes.ChainHash, err error) {
+ height, hash, err = retrieveBestBlock(ctx, db)
if err != nil {
return
}
- res, err = DeleteBlockData(ctx, db, hash, height)
+ res, err = deleteBlockData(ctx, db, hash, height)
if err != nil {
return
}
- height, hash, err = RetrieveBestBlock(ctx, db)
+ height, hash, err = retrieveBestBlock(ctx, db)
if err != nil {
return
}
- err = SetDBBestBlock(db, hash, height)
+ err = setDBBestBlock(db, hash, height)
return
}
-// DeleteBlocks removes all data for the N best blocks in the DB from every
+// deleteBlocks removes all data for the N best blocks in the DB from every
// table via repeated calls to DeleteBestBlock.
-func DeleteBlocks(ctx context.Context, N int64, db *sql.DB) (res []dbtypes.DeletionSummary, height int64, hash string, err error) {
+func deleteBlocks(ctx context.Context, N int64, db *sql.DB) (res []dbtypes.DeletionSummary, height int64, hash dbtypes.ChainHash, err error) {
// If N is less than 1, get the current best block height and hash, then
// return.
if N < 1 {
- height, hash, err = RetrieveBestBlock(ctx, db)
+ height, hash, err = retrieveBestBlock(ctx, db)
return
}
for i := int64(0); i < N; i++ {
var resi dbtypes.DeletionSummary
- resi, height, hash, err = DeleteBestBlock(ctx, db)
+ resi, height, hash, err = deleteBestBlock(ctx, db)
if err != nil {
return
}
res = append(res, resi)
- if hash == "" {
+ if hash.IsZero() {
break
}
if (i%100 == 0 && i > 0) || i == N-1 {
diff --git a/db/dcrpg/sync.go b/db/dcrpg/sync.go
index 76a0ab57a..a1ea258b7 100644
--- a/db/dcrpg/sync.go
+++ b/db/dcrpg/sync.go
@@ -57,7 +57,7 @@ func (pgb *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.BlockFetche
reindexing := newIndexes || lastBlock == -1
// See if initial sync (initial block download) was previously completed.
- ibdComplete, err := IBDComplete(pgb.db)
+ ibdComplete, err := ibdComplete(pgb.db)
if err != nil {
return lastBlock, fmt.Errorf("IBDComplete failed: %w", err)
}
@@ -105,7 +105,7 @@ func (pgb *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.BlockFetche
if pgb.utxoCache.Size() == 0 { // entries at any height implies it's warmed by previous sync
log.Infof("Collecting all UTXO data prior to height %d...", lastBlock+1)
- utxos, err := RetrieveUTXOs(ctx, pgb.db)
+ utxos, err := retrieveUTXOs(ctx, pgb.db)
if err != nil {
return -1, fmt.Errorf("RetrieveUTXOs: %w", err)
}
@@ -149,7 +149,7 @@ func (pgb *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.BlockFetche
// ibd_complete flag to false if it is not already false.
if ibdComplete && (reindexing || updateAllAddresses) {
// Set meta.ibd_complete = FALSE.
- if err = SetIBDComplete(pgb.db, false); err != nil {
+ if err = setIBDComplete(pgb.db, false); err != nil {
return nodeHeight, fmt.Errorf("failed to set meta.ibd_complete: %w", err)
}
}
@@ -397,7 +397,7 @@ func (pgb *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.BlockFetche
// invalidated and the transactions are subsequently re-mined in another
// block. Remove these before indexing.
log.Infof("Finding and removing duplicate table rows before indexing...")
- if err = pgb.DeleteDuplicates(barLoad); err != nil {
+ if err = pgb.deleteDuplicates(barLoad); err != nil {
return 0, err
}
@@ -460,7 +460,7 @@ func (pgb *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.BlockFetche
// Drop the index on addresses.matching_tx_hash if it exists.
_ = DeindexAddressTableOnMatchingTxHash(pgb.db) // ignore error if the index is absent
- numAddresses, err := pgb.UpdateSpendingInfoInAllAddresses(barLoad)
+ numAddresses, err := pgb.updateSpendingInfoInAllAddresses(barLoad)
if err != nil {
return nodeHeight, fmt.Errorf("UpdateSpendingInfoInAllAddresses FAILED: %w", err)
}
@@ -494,7 +494,7 @@ func (pgb *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.BlockFetche
}
// Set meta.ibd_complete = TRUE.
- if err = SetIBDComplete(pgb.db, true); err != nil {
+ if err = setIBDComplete(pgb.db, true); err != nil {
return nodeHeight, fmt.Errorf("failed to set meta.ibd_complete: %w", err)
}
diff --git a/db/dcrpg/system.go b/db/dcrpg/system.go
index dc71260ce..9f54c086c 100644
--- a/db/dcrpg/system.go
+++ b/db/dcrpg/system.go
@@ -141,8 +141,8 @@ func (pgs PGSettings) String() string {
return out
}
-// RetrievePGVersion retrieves the version of the connected PostgreSQL server.
-func RetrievePGVersion(db *sql.DB) (ver string, verNum uint32, err error) {
+// retrievePGVersion retrieves the version of the connected PostgreSQL server.
+func retrievePGVersion(db *sql.DB) (ver string, verNum uint32, err error) {
err = db.QueryRow(internal.RetrievePGVersion).Scan(&ver)
if err != nil {
return
@@ -201,27 +201,21 @@ func retrieveSysSettings(stmt string, db *sql.DB) (PGSettings, error) {
return settings, nil
}
-// RetrieveSysSettingsConfFile retrieves settings that are set by a
-// configuration file (rather than default, environment variable, etc.).
-func RetrieveSysSettingsConfFile(db *sql.DB) (PGSettings, error) {
- return retrieveSysSettings(internal.RetrieveSysSettingsConfFile, db)
-}
-
-// RetrieveSysSettingsPerformance retrieves performance-related settings.
-func RetrieveSysSettingsPerformance(db *sql.DB) (PGSettings, error) {
+// retrieveSysSettingsPerformance retrieves performance-related settings.
+func retrieveSysSettingsPerformance(db *sql.DB) (PGSettings, error) {
return retrieveSysSettings(internal.RetrieveSysSettingsPerformance, db)
}
-// RetrieveSysSettingsServer a key server configuration settings (config_file,
+// retrieveSysSettingsServer a key server configuration settings (config_file,
// data_directory, max_connections, dynamic_shared_memory_type,
// max_files_per_process, port, unix_socket_directories), which may be helpful
// in debugging connectivity issues or other DB errors.
-func RetrieveSysSettingsServer(db *sql.DB) (PGSettings, error) {
+func retrieveSysSettingsServer(db *sql.DB) (PGSettings, error) {
return retrieveSysSettings(internal.RetrieveSysSettingsServer, db)
}
-// RetrieveSysSettingSyncCommit retrieves the synchronous_commit setting.
-func RetrieveSysSettingSyncCommit(db *sql.DB) (syncCommit string, err error) {
+// retrieveSysSettingSyncCommit retrieves the synchronous_commit setting.
+func retrieveSysSettingSyncCommit(db *sql.DB) (syncCommit string, err error) {
err = db.QueryRow(internal.RetrieveSyncCommitSetting).Scan(&syncCommit)
return
}
diff --git a/db/dcrpg/system_online_test.go b/db/dcrpg/system_online_test.go
index 3e2db70a3..3a6885e2f 100644
--- a/db/dcrpg/system_online_test.go
+++ b/db/dcrpg/system_online_test.go
@@ -3,36 +3,27 @@
package dcrpg
import (
- "database/sql"
"testing"
)
-func TestRetrieveSysSettingsConfFile(t *testing.T) {
- ss, err := RetrieveSysSettingsConfFile(db.db)
- if err != nil && err != sql.ErrNoRows {
- t.Errorf("Failed to retrieve system settings: %v", err)
- }
- t.Logf("\n%v", ss)
-}
-
-func TestRetrieveSysSettingsPerformance(t *testing.T) {
- ss, err := RetrieveSysSettingsPerformance(db.db)
+func Test_retrieveSysSettingsPerformance(t *testing.T) {
+ ss, err := retrieveSysSettingsPerformance(db.db)
if err != nil {
t.Errorf("Failed to retrieve system settings: %v", err)
}
t.Logf("\n%v", ss)
}
-func TestRetrieveSysSettingsServer(t *testing.T) {
- ss, err := RetrieveSysSettingsServer(db.db)
+func Test_retrieveSysSettingsServer(t *testing.T) {
+ ss, err := retrieveSysSettingsServer(db.db)
if err != nil {
t.Errorf("Failed to retrieve system server: %v", err)
}
t.Logf("\n%v", ss)
}
-func TestRetrievePGVersion(t *testing.T) {
- ver, verNum, err := RetrievePGVersion(db.db)
+func Test_retrievePGVersion(t *testing.T) {
+ ver, verNum, err := retrievePGVersion(db.db)
if err != nil {
t.Errorf("Failed to retrieve postgres version: %v", err)
}
diff --git a/db/dcrpg/tables.go b/db/dcrpg/tables.go
index 78e7a6015..351e92348 100644
--- a/db/dcrpg/tables.go
+++ b/db/dcrpg/tables.go
@@ -82,7 +82,11 @@ func DropTables(db *sql.DB) {
// DropTestingTable drops only the "testing" table.
func DropTestingTable(db SqlExecutor) error {
- _, err := db.Exec(`DROP TABLE IF EXISTS testing;`)
+ _, err := db.Exec(`DROP TABLE IF EXISTS hashtest;`)
+ if err != nil {
+ return err
+ }
+ _, err = db.Exec(`DROP TABLE IF EXISTS testing;`)
return err
}
@@ -196,24 +200,24 @@ func CheckColumnDataType(db *sql.DB, table, column string) (dataType string, err
return
}
-// DeleteDuplicates attempts to delete "duplicate" rows in tables
+// deleteDuplicates attempts to delete "duplicate" rows in tables
// where unique indexes are to be created.
-func (pgb *ChainDB) DeleteDuplicates(barLoad chan *dbtypes.ProgressBarLoad) error {
+func (pgb *ChainDB) deleteDuplicates(barLoad chan *dbtypes.ProgressBarLoad) error {
allDuplicates := []dropDuplicatesInfo{
// Remove duplicate vins
- {TableName: "vins", DropDupsFunc: pgb.DeleteDuplicateVins},
+ {TableName: "vins", DropDupsFunc: pgb.deleteDuplicateVins},
// Remove duplicate vouts
- {TableName: "vouts", DropDupsFunc: pgb.DeleteDuplicateVouts},
+ {TableName: "vouts", DropDupsFunc: pgb.deleteDuplicateVouts},
// Remove duplicate transactions
- {TableName: "transactions", DropDupsFunc: pgb.DeleteDuplicateTxns},
+ {TableName: "transactions", DropDupsFunc: pgb.deleteDuplicateTxns},
// Remove duplicate agendas
- {TableName: "agendas", DropDupsFunc: pgb.DeleteDuplicateAgendas},
+ {TableName: "agendas", DropDupsFunc: pgb.deleteDuplicateAgendas},
// Remove duplicate agenda_votes
- {TableName: "agenda_votes", DropDupsFunc: pgb.DeleteDuplicateAgendaVotes},
+ {TableName: "agenda_votes", DropDupsFunc: pgb.deleteDuplicateAgendaVotes},
}
var err error
diff --git a/db/dcrpg/types_online_test.go b/db/dcrpg/types_online_test.go
index 29d99d3ec..7dc3132f6 100644
--- a/db/dcrpg/types_online_test.go
+++ b/db/dcrpg/types_online_test.go
@@ -3,12 +3,47 @@
package dcrpg
import (
+ "crypto/rand"
"testing"
"time"
"github.com/decred/dcrdata/v8/db/dbtypes"
)
+func TestChainHash_ScanValue(t *testing.T) {
+ // Clear the testing table.
+ if err := ClearTestingTable(sqlDb); err != nil {
+ t.Fatalf("Failed to clear the testing table: %v", err)
+ }
+
+ err := createTable(sqlDb, "hashtest", `CREATE TABLE IF NOT EXISTS hashtest (
+ id SERIAL8 PRIMARY KEY,
+ hash BYTEA
+ );`)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var ch dbtypes.ChainHash
+ rand.Read(ch[:])
+
+ t.Logf("Initial hash %v", ch)
+
+ var id uint64
+ err = sqlDb.QueryRow(`INSERT INTO hashtest (hash) VALUES ($1) RETURNING id`, ch).Scan(&id)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var chScanned dbtypes.ChainHash
+ err = sqlDb.QueryRow(`SELECT hash FROM hashtest`).Scan(&chScanned)
+ if err != nil {
+ t.Error(err)
+ }
+
+ t.Logf("Scanned hash %v", chScanned) // Scanned TimeDef at 1454954400. Location set to: UTC <== correct
+}
+
var (
// Two times in different locations for the same instant in time.
trefLocal = time.Unix(trefUNIX, 0).Local()
diff --git a/db/dcrpg/upgrades.go b/db/dcrpg/upgrades.go
index 86f4bcdd3..08d47dfb8 100644
--- a/db/dcrpg/upgrades.go
+++ b/db/dcrpg/upgrades.go
@@ -7,16 +7,10 @@ import (
"context"
"database/sql"
"fmt"
- "os"
- "github.com/decred/dcrd/blockchain/stake/v5"
- "github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/v3"
- "github.com/decred/dcrd/wire"
"github.com/decred/dcrdata/db/dcrpg/v8/internal"
"github.com/decred/dcrdata/v8/stakedb"
- "github.com/decred/dcrdata/v8/txhelpers"
- "github.com/lib/pq"
)
// The database schema is versioned in the meta table as follows.
@@ -25,14 +19,14 @@ const (
// upgrades. A complete DB rebuild is required if this version changes. This
// should change very rarely, but when it does change all of the upgrades
// defined here should be removed since they are no longer applicable.
- compatVersion = 1
+ compatVersion = 2
// schemaVersion pertains to a sequence of incremental upgrades to the
// database schema that may be performed for the same compatibility version.
// This includes changes such as creating tables, adding/deleting columns,
// adding/deleting indexes or any other operations that create, delete, or
// modify the definition of any database relation.
- schemaVersion = 11
+ schemaVersion = 0
// maintVersion indicates when certain maintenance operations should be
// performed for the same compatVersion and schemaVersion. Such operations
@@ -140,25 +134,25 @@ type metaData struct {
netName string
currencyNet uint32
bestBlockHeight int64
- bestBlockHash string
- dbVer DatabaseVersion
- ibdComplete bool
+ // bestBlockHash dbtypes.ChainHash
+ dbVer DatabaseVersion
+ // ibdComplete bool
}
-func insertMetaData(db *sql.DB, meta *metaData) error {
- _, err := db.Exec(internal.InsertMetaRow, meta.netName, meta.currencyNet,
- meta.bestBlockHeight, meta.bestBlockHash,
+func initMetaData(db *sql.DB, meta *metaData) error {
+ _, err := db.Exec(internal.InitMetaRow, meta.netName, meta.currencyNet,
+ meta.bestBlockHeight, // meta.bestBlockHash,
meta.dbVer.compat, meta.dbVer.schema, meta.dbVer.maint,
- meta.ibdComplete)
+ false /* meta.ibdComplete */)
return err
}
-func updateSchemaVersion(db *sql.DB, schema uint32) error {
+func updateSchemaVersion(db *sql.DB, schema uint32) error { //nolint:unused
_, err := db.Exec(internal.SetDBSchemaVersion, schema)
return err
}
-func updateMaintVersion(db *sql.DB, maint uint32) error {
+func updateMaintVersion(db *sql.DB, maint uint32) error { //nolint:unused
_, err := db.Exec(internal.SetDBMaintenanceVersion, maint)
return err
}
@@ -212,14 +206,14 @@ func (u *Upgrader) UpgradeDatabase() (bool, error) {
func (u *Upgrader) upgradeDatabase(current, target DatabaseVersion) (bool, error) {
switch current.compat {
- case 1:
- return u.compatVersion1Upgrades(current, target)
+ case 2:
+ return u.compatVersion2Upgrades(current, target)
default:
return false, fmt.Errorf("unsupported DB compatibility version %d", current.compat)
}
}
-func (u *Upgrader) compatVersion1Upgrades(current, target DatabaseVersion) (bool, error) {
+func (u *Upgrader) compatVersion2Upgrades(current, target DatabaseVersion) (bool, error) {
upgradeCheck := func() (done bool, err error) {
switch current.NeededToReach(&target) {
case OK:
@@ -245,171 +239,31 @@ func (u *Upgrader) compatVersion1Upgrades(current, target DatabaseVersion) (bool
}
// Process schema upgrades and table maintenance.
- initSchema := current.schema
+ // initSchema := current.schema
switch current.schema {
- case 0: // legacyDatabaseVersion
- // Remove table comments where the versions were stored.
- log.Infof("Performing database upgrade 1.0.0 -> 1.1.0")
- removeTableComments(u.db)
-
- // Bump schema version.
- current.schema++
- if err = updateSchemaVersion(u.db, current.schema); err != nil {
- return false, fmt.Errorf("failed to update schema version: %v", err)
- }
-
- // Continue to upgrades for the next schema version.
- fallthrough
- case 1:
- // Upgrade to schema v2.
- err = u.upgradeSchema1to2()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.1.0 to 1.2.0: %v", err)
- }
- current.schema++
- if err = updateSchemaVersion(u.db, current.schema); err != nil {
- return false, fmt.Errorf("failed to update schema version: %v", err)
- }
- fallthrough
- case 2:
- // Upgrade to schema v3.
- err = u.upgradeSchema2to3()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.2.0 to 1.3.0: %v", err)
- }
- current.schema++
- if err = updateSchemaVersion(u.db, current.schema); err != nil {
- return false, fmt.Errorf("failed to update schema version: %v", err)
- }
- fallthrough
-
- case 3:
- // Upgrade to schema v4.
- err = u.upgradeSchema3to4()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.3.0 to 1.4.0: %v", err)
- }
- current.schema++
- if err = updateSchemaVersion(u.db, current.schema); err != nil {
- return false, fmt.Errorf("failed to update schema version: %v", err)
- }
- fallthrough
-
- case 4:
- // Upgrade to schema v5.
- err = u.upgradeSchema4to5()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.4.0 to 1.5.0: %v", err)
- }
- current.schema++
- if err = updateSchemaVersion(u.db, current.schema); err != nil {
- return false, fmt.Errorf("failed to update schema version: %v", err)
- }
- fallthrough
-
- case 5:
- // Perform schema v5 maintenance.
- switch current.maint {
- case 0:
- // The maint 0 -> 1 upgrade is only needed if the user had upgraded
- // to 1.5.0 before 1.5.1 was defined.
- log.Infof("Performing database upgrade 1.5.0 -> 1.5.1")
- if initSchema == 5 {
- err = u.setTxMixData()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.5.0 to 1.5.1: %v", err)
- }
- }
- current.maint++
- if err = updateMaintVersion(u.db, current.maint); err != nil {
- return false, fmt.Errorf("failed to update maintenance version: %v", err)
- }
- fallthrough
- case 1:
- // all ready
- default:
- return false, fmt.Errorf("unsupported maint version %d", current.maint)
- }
-
- // Upgrade to schema v6.
- err = u.upgradeSchema5to6()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.5.1 to 1.6.0: %v", err)
- }
- current.schema++
- current.maint = 0
- if storeVers(u.db, ¤t); err != nil {
- return false, err
- }
+ case 0:
+ return true, nil // nothing to do
- fallthrough
-
- case 6:
- err = u.upgradeSchema6to7()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.6.0 to 1.7.0: %v", err)
- }
- current.schema++
- current.maint = 0
- if storeVers(u.db, ¤t); err != nil {
- return false, err
- }
+ /* when there's an upgrade to define:
+ case 0:
+ // Remove table comments where the versions were stored.
+ log.Infof("Performing database upgrade 2.0.0 -> 2.1.0")
- fallthrough
+ // removeTableComments(u.db) // do something here
- case 7:
err = u.upgradeSchema7to8()
if err != nil {
return false, fmt.Errorf("failed to upgrade 1.7.0 to 1.8.0: %v", err)
}
current.schema++
current.maint = 0
- if storeVers(u.db, ¤t); err != nil {
- return false, err
- }
-
- fallthrough
-
- case 8:
- err = u.upgradeSchema8to9()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.8.0 to 1.9.0: %v", err)
- }
- current.schema++
- current.maint = 0
- if storeVers(u.db, ¤t); err != nil {
+ if err = storeVers(u.db, ¤t); err != nil {
return false, err
}
fallthrough
- case 9:
- err = u.upgradeSchema9to10()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.9.0 to 1.10.0: %v", err)
- }
- current.schema++
- current.maint = 0
- if storeVers(u.db, ¤t); err != nil {
- return false, err
- }
-
- fallthrough
-
- case 10:
- err = u.upgradeSchema10to11()
- if err != nil {
- return false, fmt.Errorf("failed to upgrade 1.10.0 to 1.11.0: %v", err)
- }
- current.schema++
- current.maint = 0
- if storeVers(u.db, ¤t); err != nil {
- return false, err
- }
-
- fallthrough
-
- case 11:
+ case 1:
// Perform schema v11 maintenance.
// No further upgrades.
@@ -417,12 +271,14 @@ func (u *Upgrader) compatVersion1Upgrades(current, target DatabaseVersion) (bool
// Or continue to upgrades for the next schema version.
// fallthrough
+ */
+
default:
return false, fmt.Errorf("unsupported schema version %d", current.schema)
}
}
-func storeVers(db *sql.DB, dbVer *DatabaseVersion) error {
+func storeVers(db *sql.DB, dbVer *DatabaseVersion) error { //nolint:unused
err := updateSchemaVersion(db, dbVer.schema)
if err != nil {
return fmt.Errorf("failed to update schema version: %w", err)
@@ -431,541 +287,10 @@ func storeVers(db *sql.DB, dbVer *DatabaseVersion) error {
return fmt.Errorf("failed to update maintenance version: %w", err)
}
-func removeTableComments(db *sql.DB) {
- for _, pair := range createTableStatements {
- tableName := pair[0]
- _, err := db.Exec(fmt.Sprintf(`COMMENT ON table %s IS NULL;`, tableName))
- if err != nil {
- log.Errorf(`Failed to remove comment on table %s.`, tableName)
- }
- }
-}
-
-func (u *Upgrader) upgradeSchema10to11() error {
- log.Infof("Performing database upgrade 1.10.0 -> 1.11.0")
- // The status table already had an index created automatically because of
- // the UNIQUE constraint declaration for the heights column. Remove the
- // redundant index uix_stats_height on stats(height).
- return DeindexStatsTableOnHeight(u.db)
-}
-
-func (u *Upgrader) upgradeSchema9to10() (err error) {
- log.Infof("Performing database upgrade 1.9.0 -> 1.10.0")
-
- exists, err := TableExists(u.db, "swaps")
- if err != nil {
- return err
- }
- if !exists {
- // NOTE: cannot create table in a DB transaction and use it.
- _, err = u.db.Exec(internal.CreateAtomicSwapTableV0)
- if err != nil {
- return fmt.Errorf("CreateStatsTable: %w", err)
- }
-
- _, err = u.db.Exec(internal.IndexSwapsOnHeightV0)
- if err != nil {
- return fmt.Errorf("IndexSwapsOnHeight: %v", err)
- }
- }
-
- dbTx, err := u.db.Begin()
- if err != nil {
- return fmt.Errorf("failed to create db transaction: %w", err)
- }
- defer func() {
- if err == nil {
- err = dbTx.Commit()
- return
- }
- if errRoll := dbTx.Rollback(); errRoll != nil {
- log.Errorf("Rollback failed: %v", errRoll)
- // but keep err
- }
- }()
-
- makeErr := func(s string, args ...interface{}) error {
- err = fmt.Errorf(s, args...)
- return err
- }
-
- // Start with a height-ordered list of block data.
- blockRows, err := u.db.Query(`
- SELECT hash, height
- FROM blocks
- WHERE is_mainchain
- ORDER BY height ASC
- ;`)
- if err != nil {
- return makeErr("block hash query error: %w", err)
- }
- defer blockRows.Close()
-
- var redeems, refunds int
-
- var checkHeight int64
- for blockRows.Next() {
- if u.ctx.Err() != nil {
- return makeErr("context cancelled. rolling back update")
- }
-
- var height int64
- var hashStr string
- err = blockRows.Scan(&hashStr, &height)
- if err != nil {
- return makeErr("blockRows.Scan: %w", err)
- }
- hash, err := chainhash.NewHashFromStr(hashStr)
- if err != nil {
- return makeErr("NewHashFromStr: %w", err)
- }
- // If the height is not the expected height, the database must be corrupted.
- if height != checkHeight {
- return makeErr("height mismatch %d != %d. database corrupted!", height, checkHeight)
- }
- checkHeight++
- // A periodic update message.
- if height%10000 == 0 {
- log.Infof("Processing atomic swaps in blocks [%d,%d)", height, height+10000)
- }
-
- msgBlock, err := u.bg.GetBlock(u.ctx, hash)
- if err != nil {
- return makeErr("GetBlock(%v): %w", hash, err)
- }
-
- for _, tx := range msgBlock.Transactions[1:] { // skip the coinbase
- // This will only identify the redeem and refund txns, unlike
- // the use of TxAtomicSwapsInfo in API and explorer calls.
- swapTxns, err := txhelpers.MsgTxAtomicSwapsInfo(tx, nil, u.params)
- if err != nil {
- log.Warnf("MsgTxAtomicSwapsInfo: %v", err)
- continue
- }
- if swapTxns == nil || swapTxns.Found == "" {
- continue
- }
- for _, red := range swapTxns.Redemptions {
- err = InsertSwap(u.db, height, red)
- if err != nil {
- return makeErr("InsertSwap: %w", err)
- }
- redeems++
- }
- for _, ref := range swapTxns.Refunds {
- err = InsertSwap(u.db, height, ref)
- if err != nil {
- return makeErr("InsertSwap: %w", err)
- }
- refunds++
- }
- }
-
- }
-
- if err = blockRows.Err(); err != nil {
- return makeErr("blockRows.Err: %w", err)
- }
-
- log.Infof("Inserted %d contract redeems, %d contract refunds.", redeems, refunds)
-
- return nil
-}
-
-func (u *Upgrader) upgradeSchema8to9() error {
- log.Infof("Performing database upgrade 1.8.0 -> 1.9.0")
-
- // Create and index the treasury table.
- _, err := u.db.Exec(
- `CREATE TABLE treasury AS
- SELECT tx_hash, tx_type, spent AS value, block_hash, block_height, block_time, is_mainchain
- FROM transactions
- WHERE tx_type = ANY($1);`,
- pq.Int32Array([]int32{int32(stake.TxTypeTAdd), int32(stake.TxTypeTSpend), int32(stake.TxTypeTreasuryBase)}),
- )
- if err != nil {
- return fmt.Errorf("CreateTreasuryTable: %w", err)
- }
-
- // Make TSPEND value negative.
- _, err = u.db.Exec(`UPDATE treasury SET value = -value WHERE tx_type=$1;`,
- int(stake.TxTypeTSpend))
- if err != nil {
- return fmt.Errorf("updating tspend values failed: %v", err)
- }
-
- // Set TADD value properly from vout 0 value.
- _, err = u.db.Exec(`UPDATE treasury SET value = vouts.value FROM vouts
- WHERE tx_type=$1 AND treasury.tx_hash=vouts.tx_hash AND vouts.tx_index=0;`,
- int(stake.TxTypeTAdd))
- if err != nil {
- return fmt.Errorf("updating tadd values failed: %w", err)
- }
-
- _, err = u.db.Exec(internal.IndexTreasuryOnTxHash)
- if err != nil {
- return fmt.Errorf("IndexTreasuryOnTxHash: %w", err)
- }
-
- _, err = u.db.Exec(internal.IndexTreasuryOnBlockHeight)
- if err != nil {
- return fmt.Errorf("IndexTreasuryOnBlockHeight: %w", err)
- }
-
- // Import treasury txns from the transactions table.
-
- return nil
-}
-
-func (u *Upgrader) upgradeSchema7to8() error {
- log.Infof("Performing database upgrade 1.7.0 -> 1.8.0")
- // Index the transactions table on block height. This drastically
- // accelerates several queries including those for the following charts
- // updaters: fees, coin supply, privacy participation, and anonymity set.
- return IndexTransactionTableOnBlockHeight(u.db)
-}
-
-func (u *Upgrader) upgradeSchema6to7() error {
- log.Infof("Performing database upgrade 1.6.0 -> 1.7.0")
- // Create the missing vouts.spend_tx_row_id index.
- return IndexVoutTableOnSpendTxID(u.db)
-}
-
-func (u *Upgrader) upgradeSchema5to6() error {
- // Add the mixed column to vouts table.
- log.Infof("Performing database upgrade 1.5.1 -> 1.6.0")
- _, err := u.db.Exec(`ALTER TABLE vouts
- ADD COLUMN mixed BOOLEAN DEFAULT FALSE,
- ADD COLUMN spend_tx_row_id INT8;`)
- if err != nil {
- return fmt.Errorf("ALTER TABLE vouts error: %v", err)
- }
-
- // Set the vouts.mixed column based on transactions.mix_denom and
- // transactions.vout_db_ids and vouts.value.
- log.Infof("Setting vouts.mixed (BOOLEAN) column for mixing transaction outputs with mix_denom value...")
- _, err = u.db.Exec(`UPDATE vouts SET mixed=true
- FROM transactions
- WHERE vouts.id=ANY(transactions.vout_db_ids)
- AND vouts.value=transactions.mix_denom
- AND transactions.mix_denom>0;`)
- if err != nil {
- return fmt.Errorf("UPDATE vouts.mixed error: %v", err)
- }
-
- // Set vouts.spend_tx_row_id using vouts.tx_hash, vins.prev_tx_hash, and
- // transactions.tx_hash.
- log.Infof("Setting vouts.spend_tx_row_id (INT8) column. This will take a while...")
- var N int64
- N, err = updateSpendTxInfoInAllVouts(u.db)
- if err != nil {
- return fmt.Errorf("UPDATE vouts.spend_tx_row_id error: %v", err)
- }
- log.Debugf("Updated %d rows of vouts table.", N)
-
- // var rows *sql.Rows
- // rows, err = u.db.Query(`SELECT vouts.id AS vout_id, transactions.block_height AS spend_height
- // FROM vouts
- // JOIN vins ON vouts.tx_hash=vins.prev_tx_hash AND mixed=TRUE AND vins.is_mainchain=TRUE
- // JOIN transactions ON transactions.tx_hash=vins.tx_hash;`)
- // if err != nil {
- // return fmt.Errorf("SELECT error: %v", err)
- // }
- // defer rows.Close()
-
- // var voutIDs, spendHeights []int64
- // for rows.Next() {
- // var voutID, spendHeight int64
- // err = rows.Scan(&voutID, &spendHeight)
- // if err != nil {
- // return fmt.Errorf("Scan error: %v", err)
- // }
- // voutIDs = append(voutIDs, voutID)
- // spendHeights = append(spendHeights, spendHeight)
- // }
-
- // for i := range voutIDs {
- // var N int64
- // N, err = sqlExec(u.db, `UPDATE vouts SET spend_height = $1 WHERE id=$2`, "UPDATE vouts error",
- // spendHeights[i], voutIDs[i])
- // if err != nil {
- // return err
- // }
- // if N != 1 {
- // return fmt.Errorf("failed to update 1 row, updated %d", N)
- // }
- // }
-
- // For all mixed vouts where spending tx is type stake.TxTypeSStx (a
- // ticket), set the ticket's vouts as mixed.
- log.Infof("Setting vouts.mixed (BOOLEAN) column for tickets funded by mixing split txns...")
- _, err = u.db.Exec(`UPDATE vouts SET mixed=TRUE
- FROM (SELECT DISTINCT ON(transactions.id) transactions.vout_db_ids
- FROM vouts
- JOIN transactions
- ON vouts.spend_tx_row_id=transactions.id
- AND vouts.mixed=true
- AND transactions.tx_type=1) AS mix_funded_tickets
- WHERE vouts.id=ANY(mix_funded_tickets.vout_db_ids)
- AND vouts.value > 0;`)
- if err != nil {
- return fmt.Errorf("UPDATE ticket vouts error: %v", err)
- }
-
- // For all mixed vouts where spending tx is type stake.TxTypeGen (a vote),
- // set the vote's vouts as mixed.
- log.Infof("Setting vouts.mixed (BOOLEAN) column for votes and revokes funded by tickets funded by mixing split txns...")
- _, err = u.db.Exec(`UPDATE vouts SET mixed=TRUE
- FROM (SELECT DISTINCT ON(transactions.id) transactions.vout_db_ids
- FROM vouts
- JOIN transactions
- ON vouts.spend_tx_row_id=transactions.id
- AND vouts.mixed=true
- AND (transactions.tx_type=2 OR transactions.tx_type=3)) AS mix_funded_votes
- WHERE vouts.id=ANY(mix_funded_votes.vout_db_ids)
- AND vouts.value > 0;`)
- if err != nil {
- return fmt.Errorf("UPDATE vote vouts error: %v", err)
- }
-
- // NOTE: fund and spend heights of mix transaction outputs
- // `SELECT vouts.value, fund_tx.block_height, spend_tx.block_height
- // FROM vouts
- // JOIN transactions AS fund_tx ON vouts.tx_hash=fund_tx.tx_hash
- // JOIN transactions AS spend_tx ON spend_tx_row_id=spend_tx.id
- // WHERE mixed=true;`
-
- return nil
-}
-
-func (u *Upgrader) upgradeSchema4to5() error {
- // Add the mix_count and mix_denom columns to the transactions table.
- log.Infof("Performing database upgrade 1.4.0 -> 1.5.0")
- _, err := u.db.Exec(`ALTER TABLE transactions
- ADD COLUMN IF NOT EXISTS mix_count INT4 DEFAULT 0,
- ADD COLUMN IF NOT EXISTS mix_denom INT8 DEFAULT 0;`)
- if err != nil {
- return fmt.Errorf("ALTER TABLE transactions error: %v", err)
- }
-
- return u.setTxMixData()
-}
-
-func (u *Upgrader) setTxMixData() error {
- log.Infof("Retrieving possible mix transactions...")
- txnRows, err := u.db.Query(`SELECT transactions.id, transactions.tx_hash, array_agg(value), min(blocks.sbits)
- FROM transactions
- JOIN vouts ON vouts.id=ANY(vout_db_ids)
- JOIN blocks ON blocks.hash = transactions.block_hash
- WHERE tree = 0 AND num_vout>=3
- GROUP BY transactions.id;`)
- if err != nil {
- return fmt.Errorf("transaction query error: %v", err)
- }
-
- var mixIDs []int64
- var mixDenoms []int64
- var mixCounts []uint32
-
- msgTx := new(wire.MsgTx)
- for txnRows.Next() {
- var vals []int64
- var hash string
- var id, ticketPrice int64
- err = txnRows.Scan(&id, &hash, pq.Array(&vals), &ticketPrice)
- if err != nil {
- txnRows.Close()
- return fmt.Errorf("Scan failed: %v", err)
- }
-
- txouts := make([]*wire.TxOut, 0, len(vals))
- txins := make([]*wire.TxIn, 0, len(vals))
- for _, v := range vals {
- txouts = append(txouts, &wire.TxOut{
- Value: v,
- })
- txins = append(txins, &wire.TxIn{ /*dummy*/ })
- }
- msgTx.TxOut = txouts
- msgTx.TxIn = txins
-
- _, mixDenom, mixCount := txhelpers.IsMixTx(msgTx)
- if mixCount == 0 {
- _, mixDenom, mixCount = txhelpers.IsMixedSplitTx(msgTx, int64(txhelpers.DefaultRelayFeePerKb), ticketPrice)
- if mixCount == 0 {
- continue
- }
- }
-
- mixIDs = append(mixIDs, id)
- mixDenoms = append(mixDenoms, mixDenom)
- mixCounts = append(mixCounts, mixCount)
- }
-
- txnRows.Close()
-
- stmt, err := u.db.Prepare(`UPDATE transactions SET mix_count = $2, mix_denom = $3 WHERE id = $1;`)
- if err != nil {
- return err
- }
- defer stmt.Close()
-
- log.Infof("Updating transaction data for %d mix transactions...", len(mixIDs))
- for i := range mixIDs {
- N, err := sqlExecStmt(stmt, "failed to update transaction: ", mixIDs[i], mixCounts[i], mixDenoms[i])
- if err != nil {
- return err
- }
- if N != 1 {
- log.Warnf("Updated %d transactions rows instead of 1", N)
- }
- }
-
- return err
-}
-
-// This changes the data type of votes.version from INT2 to INT4.
-func (u *Upgrader) upgradeSchema3to4() error {
- // Change the data type of votes.version.
- log.Infof("Performing database upgrade 1.3.0 -> 1.4.0")
- _, err := u.db.Exec(`ALTER TABLE votes ALTER COLUMN version TYPE INT4`)
- return err
-}
-
-// This indexes the blocks table on the "time" column.
-func (u *Upgrader) upgradeSchema2to3() error {
- // Create the stats table and height index.
- log.Infof("Performing database upgrade 1.2.0 -> 1.3.0")
-
- existsIdx, err := ExistsIndex(u.db, internal.IndexBlocksTableOnTime)
- if err != nil {
- return err
- }
- if existsIdx {
- log.Warnf("The index %s already exists!", internal.IndexOfBlocksTableOnTime)
- return nil
- }
-
- return IndexBlockTableOnTime(u.db)
-}
-
-// This upgrade creates a stats table and adds a winners row to the blocks table
-// necessary to replace information from the sqlite database, which is being
-// dropped. As part of the upgrade, the entire blockchain must be requested and
-// the ticket pool evolved appropriately.
-func (u *Upgrader) upgradeSchema1to2() error {
- // Create the stats table and height index.
- log.Infof("Performing database upgrade 1.1.0 -> 1.2.0")
- exists, err := TableExists(u.db, "stats")
- if err != nil {
- return err
- }
- if !exists {
- _, err = u.db.Exec(internal.CreateStatsTable)
- if err != nil {
- return fmt.Errorf("CreateStatsTable: %v", err)
- }
- _, err = u.db.Exec(internal.IndexStatsOnHeight)
- if err != nil {
- return fmt.Errorf("IndexStatsOnHeight: %v", err)
- }
- _, err = u.db.Exec(`ALTER TABLE blocks ADD COLUMN IF NOT EXISTS winners TEXT[];`)
- if err != nil {
- return fmt.Errorf("Add winners column error: %v", err)
- }
- }
- // Do everything else under a transaction.
- dbTx, err := u.db.Begin()
- if err != nil {
- return fmt.Errorf("failed to create db transaction: %v", err)
- }
- defer func() {
- if err == nil {
- dbTx.Commit()
- } else {
- dbTx.Rollback()
- }
- }()
- makeErr := func(s string, args ...interface{}) error {
- err = fmt.Errorf(s, args...)
- return err
- }
- // Start with a height-ordered list of block data.
- blockRows, err := u.db.Query(`
- SELECT id, hash, height
- FROM blocks
- WHERE is_mainchain
- ORDER BY height
- ;`)
- if err != nil {
- return makeErr("block hash query error: %v", err)
- }
- defer blockRows.Close()
- // Set the stake database to the genesis block.
- dir, err := os.MkdirTemp("", "tempstake")
- if err != nil {
- return makeErr("unable to create temp directory")
- }
- defer os.RemoveAll(dir)
- sDB, _, err := u.stakeDB.EmptyCopy(dir)
- if err != nil {
- return makeErr("stake db init error: %v", err)
- }
- // Two prepared statements.
- statsStmt, err := dbTx.Prepare(internal.UpsertStats)
- if err != nil {
- return makeErr("failed to prepare stats insert statement: %v", err)
- }
- // sql does not deal with PostgreSQL array syntax, it must be Sprintf'd.
- winnersStmt, err := dbTx.Prepare("UPDATE blocks SET winners = $1 where hash = $2;")
- if err != nil {
- return makeErr("failed to prepare winners insert statement: %v", err)
- }
-
- checkHeight := 0
- var hashStr string
- var id, height int
- for blockRows.Next() {
- if u.ctx.Err() != nil {
- return makeErr("context cancelled. rolling back update")
- }
- blockRows.Scan(&id, &hashStr, &height)
- hash, err := chainhash.NewHashFromStr(hashStr)
- if err != nil {
- return makeErr("NewHashFromStr: %v", err)
- }
- // If the height is not the expected height, the database must be corrupted.
- if height != checkHeight {
- return makeErr("height mismatch %d != %d. database corrupted!", height, checkHeight)
- }
- checkHeight += 1
- // A periodic update message.
- if height%10000 == 0 {
- log.Infof("Processing blocks %d - %d", height, height+9999)
- }
- // Connecting the block updates the live ticket cache and ticket info cache.
- // The StakeDatabase is pre-populated with the genesis block, so skip it.
- if height > 0 {
- _, err = sDB.ConnectBlockHash(hash)
- if err != nil {
- return makeErr("ConnectBlockHash: %v", err)
- }
- }
-
- // The "best" pool info is for the chain at the tip just added.
- poolInfo := sDB.PoolInfoBest()
- if poolInfo == nil {
- return makeErr("PoolInfoBest error encountered")
- }
- // Insert rows.
- _, err = statsStmt.Exec(id, height, poolInfo.Size, int64(poolInfo.Value*dcrToAtoms))
- if err != nil {
- return makeErr("insert Exec: %v", err)
- }
- _, err = winnersStmt.Exec(pq.Array(poolInfo.Winners), hashStr)
- if err != nil {
- return makeErr("update Exec: %v", err)
- }
- }
- return nil
+/* define when needed
+func (u *Upgrader) upgradeSchema-to1() error {
+ log.Infof("Performing database upgrade 2.0.0 -> 2.1.0")
+ // describe the actions...
+ return whatever(u.db)
}
+*/
diff --git a/exchanges/bot.go b/exchanges/bot.go
index 9b4621f7b..93de98167 100644
--- a/exchanges/bot.go
+++ b/exchanges/bot.go
@@ -300,6 +300,16 @@ func NewExchangeBot(config *ExchangeBotConfig) (*ExchangeBot, error) {
failed: false,
}
+ /* TODO: specify proxy
+ proxyURL, err := url.Parse("socks5://127.0.0.1:1080")
+ if err != nil {
+ return nil, err
+ }
+ bot.client.Transport = &http.Transport{
+ Proxy: http.ProxyURL(proxyURL),
+ }
+ */
+
if config.MasterBot != "" {
if config.MasterCertFile == "" {
return nil, fmt.Errorf("No TLS certificate path provided")
diff --git a/exchanges/go.mod b/exchanges/go.mod
index 06ff2d91f..dc8cd6030 100644
--- a/exchanges/go.mod
+++ b/exchanges/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/exchanges/v3
-go 1.18
+go 1.21
require (
decred.org/dcrdex v0.6.1
diff --git a/exchanges/go.sum b/exchanges/go.sum
index 02d05114f..b851cadee 100644
--- a/exchanges/go.sum
+++ b/exchanges/go.sum
@@ -186,6 +186,7 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
+github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -340,6 +341,7 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI=
+github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -432,6 +434,7 @@ github.com/go-chi/chi/v5 v5.0.4/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITL
github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -570,6 +573,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -1150,6 +1154,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
@@ -1774,6 +1779,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
@@ -1809,7 +1815,9 @@ gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/exchanges/rateserver/go.mod b/exchanges/rateserver/go.mod
index de4e6de05..e810eb860 100644
--- a/exchanges/rateserver/go.mod
+++ b/exchanges/rateserver/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/exchanges/rateserver
-go 1.18
+go 1.21
replace github.com/decred/dcrdata/exchanges/v3 => ../
diff --git a/exchanges/rateserver/go.sum b/exchanges/rateserver/go.sum
index 5a3322e7b..4703458d9 100644
--- a/exchanges/rateserver/go.sum
+++ b/exchanges/rateserver/go.sum
@@ -186,6 +186,7 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
+github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -340,6 +341,7 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI=
+github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -432,6 +434,7 @@ github.com/go-chi/chi/v5 v5.0.4/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITL
github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -570,6 +573,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -1151,6 +1155,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
@@ -1775,6 +1780,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
@@ -1810,7 +1816,9 @@ gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/exchanges/rateserver/types.go b/exchanges/rateserver/types.go
index 4c3b789bd..bb95a42b6 100644
--- a/exchanges/rateserver/types.go
+++ b/exchanges/rateserver/types.go
@@ -225,8 +225,3 @@ func (client *rateClient) SendExchangeUpdate(update *dcrrates.ExchangeRateUpdate
func (client *rateClient) Stream() GRPCStream {
return client.stream
}
-
-// Determine if the grpc.ServerStream's context Done() channel has been closed.
-func (client *rateClient) isDone() bool {
- return client.stream.Context().Err() != nil
-}
diff --git a/go.mod b/go.mod
index c47f85690..fb16e8de3 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/v8
-go 1.18
+go 1.21
require (
github.com/davecgh/go-spew v1.1.1
@@ -17,6 +17,7 @@ require (
github.com/decred/dcrd/wire v1.6.0
github.com/decred/slog v1.2.0
github.com/dgraph-io/badger v1.6.2
+ github.com/lib/pq v1.10.9
golang.org/x/net v0.20.0
)
diff --git a/go.sum b/go.sum
index a7abda8e5..aedae0b34 100644
--- a/go.sum
+++ b/go.sum
@@ -107,6 +107,8 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
@@ -185,6 +187,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
diff --git a/gov/go.mod b/gov/go.mod
index 5fe995c3d..4a65af51c 100644
--- a/gov/go.mod
+++ b/gov/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/gov/v6
-go 1.18
+go 1.21
replace github.com/decred/dcrdata/v8 => ../
@@ -46,6 +46,7 @@ require (
github.com/gorilla/schema v1.1.0 // indirect
github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
+ github.com/lib/pq v1.10.9 // indirect
github.com/marcopeereboom/sbox v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
diff --git a/gov/go.sum b/gov/go.sum
index 81503e0f8..7195a0255 100644
--- a/gov/go.sum
+++ b/gov/go.sum
@@ -453,6 +453,7 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
+github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
@@ -690,6 +691,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
@@ -1305,6 +1308,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/gov/politeia/proposals.go b/gov/politeia/proposals.go
index 97be6331b..524744cca 100644
--- a/gov/politeia/proposals.go
+++ b/gov/politeia/proposals.go
@@ -532,7 +532,6 @@ func (db *ProposalsDB) proposal(searchBy, searchTerm string) (*pitypes.ProposalR
var proposal pitypes.ProposalRecord
err := db.dbP.Select(q.Eq(searchBy, searchTerm)).Limit(1).First(&proposal)
if err != nil {
- log.Errorf("Failed to fetch data from Proposals DB: %v", err)
return nil, err
}
diff --git a/lint.sh b/lint.sh
index 0b2a340e9..5443bbc7c 100755
--- a/lint.sh
+++ b/lint.sh
@@ -33,20 +33,6 @@ MODPATHS="./go.mod ./exchanges/go.mod ./gov/go.mod ./db/dcrpg/go.mod ./cmd/dcrda
./testutil/apiload/go.mod ./exchanges/rateserver/go.mod"
#MODPATHS=$(find . -name go.mod -type f -print)
-alias superlint="golangci-lint run --deadline=10m \
- --disable-all \
- --enable govet \
- --enable staticcheck \
- --enable gosimple \
- --enable unconvert \
- --enable ineffassign \
- --enable structcheck \
- --enable goimports \
- --enable misspell \
- --enable unparam \
- --enable asciicheck \
- --enable makezero"
-
# run lint on all listed modules
set +e
ERROR=0
@@ -55,7 +41,7 @@ for MODPATH in $MODPATHS; do
module=$(dirname "${MODPATH}")
pushd "$module" > /dev/null
echo "Linting: $MODPATH"
- superlint
+ golangci-lint run
if [[ "$GV" =~ ^1.21 ]]; then
MOD_STATUS=$(git status --porcelain go.mod go.sum)
go mod tidy
diff --git a/pubsub/democlient/go.mod b/pubsub/democlient/go.mod
index 0b7b8a1df..a31df17b4 100644
--- a/pubsub/democlient/go.mod
+++ b/pubsub/democlient/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/pubsub/democlient
-go 1.18
+go 1.21
replace github.com/decred/dcrdata/v8 => ../../
@@ -39,6 +39,7 @@ require (
github.com/golang/snappy v0.0.4 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
+ github.com/lib/pq v1.10.9 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
diff --git a/pubsub/democlient/go.sum b/pubsub/democlient/go.sum
index f38be5c15..3b82e9337 100644
--- a/pubsub/democlient/go.sum
+++ b/pubsub/democlient/go.sum
@@ -92,6 +92,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ=
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
@@ -107,6 +108,8 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
@@ -149,6 +152,7 @@ github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
@@ -198,6 +202,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -235,6 +240,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/testutil/apiload/go.mod b/testutil/apiload/go.mod
index c4d473b19..30b3cdd26 100644
--- a/testutil/apiload/go.mod
+++ b/testutil/apiload/go.mod
@@ -1,6 +1,6 @@
module github.com/decred/dcrdata/testutil/apiload
-go 1.18
+go 1.21
require (
github.com/jessevdk/go-flags v1.5.0
diff --git a/testutil/dbload/go.mod b/testutil/dbload/go.mod
index 88d8ad979..420c68b8b 100644
--- a/testutil/dbload/go.mod
+++ b/testutil/dbload/go.mod
@@ -1,10 +1,10 @@
module github.com/decred/dcrdata/testutil/dbload
-go 1.18
+go 1.21
replace github.com/decred/dcrdata/v8 => ../../
require (
github.com/decred/dcrdata/v8 v8.0.0
- github.com/lib/pq v1.10.4
+ github.com/lib/pq v1.10.9
)
diff --git a/testutil/dbload/go.sum b/testutil/dbload/go.sum
index 08b950f0b..aeddeae36 100644
--- a/testutil/dbload/go.sum
+++ b/testutil/dbload/go.sum
@@ -1,2 +1,2 @@
-github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk=
-github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=