Skip to content

Commit

Permalink
Fix for provider id (#396)
Browse files Browse the repository at this point in the history
## Description
<!-- Describe what change this PR is implementing -->

## Types of Changes
Please select the branch type you are merging and fill in the relevant
template.
<!--- Check the following box with an x if the following applies: -->
- [ ] Hotfix
- [ ] Release
- [ ] Fix or Feature

## Fix or Feature
<!--- Check the following box with an x if the following applies: -->

### Types of Changes
<!--- What types of changes does your code introduce? -->
- [ ] Tech Debt (Code improvements)
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing
functionality to change)
- [ ] Dependency upgrade (A change in substrate or any 3rd party crate
version)

### Migrations and Hooks
<!--- Check the following box with an x if the following applies: -->
- [ ] This change requires a runtime migration.
- [ ] Modifies `on_initialize`
- [ ] Modifies `on_finalize`

### Checklist for Fix or Feature
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Change has been tested locally.
- [ ] Change adds / updates tests if applicable.
- [ ] Changelog doc updated.
- [ ] `spec_version` has been incremented.
- [ ] `network-relayer`'s
[events](https://github.com/Cerebellum-Network/network-relayer/blob/dev-cere/shared/substrate/events.go)
have been updated according to the blockchain events if applicable.
- [ ] All CI checks have been passed successfully

## Checklist for Hotfix
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Changelog has been updated.
- [ ] Crate version has been updated.
- [ ] `spec_version` has been incremented.
- [ ] Transaction version has been updated if required.
- [ ] Pull Request to `dev` has been created.
- [ ] Pull Request to `staging` has been created.
- [ ] `network-relayer`'s
[events](https://github.com/Cerebellum-Network/network-relayer/blob/dev-cere/shared/substrate/events.go)
have been updated according to the blockchain events if applicable.
- [ ] All CI checks have been passed successfully

## Checklist for Release
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Change has been deployed to Devnet.
- [ ] Change has been tested in Devnet.
- [ ] Change has been deployed to Qanet.
- [ ] Change has been tested in Qanet.
- [ ] Change has been deployed to Testnet.
- [ ] Change has been tested in Testnet.
- [ ] Changelog has been updated.
- [ ] Crate version has been updated.
- [ ] Spec version has been updated.
- [ ] Transaction version has been updated if required.
- [ ] All CI checks have been passed successfully
  • Loading branch information
ayushmishra2005 authored Jul 20, 2024
1 parent 1e50ecb commit ee9c426
Show file tree
Hide file tree
Showing 6 changed files with 59 additions and 42 deletions.
2 changes: 1 addition & 1 deletion pallets/ddc-customers/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ pub mod pallet {

/// The current storage version.
const STORAGE_VERSION: frame_support::traits::StorageVersion =
frame_support::traits::StorageVersion::new(1);
frame_support::traits::StorageVersion::new(2);

#[pallet::pallet]
#[pallet::storage_version(STORAGE_VERSION)]
Expand Down
57 changes: 52 additions & 5 deletions pallets/ddc-verification/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,9 @@ pub mod pallet {
FailedToFetchCurrentValidator {
validator: T::AccountId,
},
FailedToFetchNodeProvider {
validator: T::AccountId,
},
}

/// Consensus Errors
Expand Down Expand Up @@ -358,6 +361,7 @@ pub mod pallet {
era_id: DdcEra,
},
FailedToFetchCurrentValidator,
FailedToFetchNodeProvider,
}

#[pallet::error]
Expand Down Expand Up @@ -489,8 +493,6 @@ pub mod pallet {
pub(crate) struct NodeActivity {
/// Node id.
pub(crate) node_id: String,
/// Provider id.
pub(crate) provider_id: String,
/// Total amount of stored bytes.
pub(crate) stored_bytes: u64,
/// Total amount of transferred bytes.
Expand Down Expand Up @@ -1684,9 +1686,10 @@ pub mod pallet {
payees: nodes_activity_batched[i]
.iter()
.map(|activity| {
let provider_id =
T::AccountId::decode(&mut &activity.provider_id.as_bytes()[..])
.unwrap();
let node_id = activity.clone().node_id;

let provider_id = Self::fetch_provider_id(node_id).unwrap(); // todo! remove unwrap

let node_usage = NodeUsage {
transferred_bytes: activity.transferred_bytes,
stored_bytes: activity.stored_bytes,
Expand Down Expand Up @@ -1830,6 +1833,36 @@ pub mod pallet {
}
}

pub(crate) fn store_provider_id<A: Encode>(
// todo! (3) add tests
node_id: String,
provider_id: A,
) {
let key = format!("offchain::activities::provider_id::{:?}", node_id).into_bytes();
let encoded_tuple = provider_id.encode();

// Store the serialized data in local offchain storage
sp_io::offchain::local_storage_set(StorageKind::PERSISTENT, &key, &encoded_tuple);
}

pub(crate) fn fetch_provider_id<A: Decode>(node_id: String) -> Option<A> {
let key = format!("offchain::activities::provider_id::{:?}", node_id).into_bytes();
// Retrieve encoded tuple from local storage
let encoded_tuple =
match sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) {
Some(data) => data,
None => return None,
};

match Decode::decode(&mut &encoded_tuple[..]) {
Ok(provider_id) => Some(provider_id),
Err(err) => {
// Print error message with details of the decoding error
log::error!("🦀Decoding error while fetching provider id: {:?}", err);
None
},
}
}
/// Converts a vector of activity batches into their corresponding Merkle roots.
///
/// This function takes a vector of activity batches, where each batch is a vector of
Expand Down Expand Up @@ -2300,6 +2333,11 @@ pub mod pallet {
Ok(dac_nodes)
}

fn get_node_provider_id(node_pub_key: &NodePubKey) -> Result<T::AccountId, OCWError> {
T::NodeVisitor::get_node_provider_id(node_pub_key)
.map_err(|_| OCWError::FailedToFetchNodeProvider)
}

/// Fetch node usage of an era.
///
/// Parameters:
Expand All @@ -2324,6 +2362,10 @@ pub mod pallet {
node_pub_key: node_pub_key.clone(),
}
})?;
for node_activity in usage.clone() {
let provider_id = Self::get_node_provider_id(node_pub_key).unwrap();
Self::store_provider_id(node_activity.node_id, provider_id);
}

node_usages.push((node_pub_key.clone(), usage));
}
Expand Down Expand Up @@ -2677,6 +2719,11 @@ pub mod pallet {
validator: caller.clone(),
});
},
OCWError::FailedToFetchNodeProvider => {
Self::deposit_event(Event::FailedToFetchNodeProvider {
validator: caller.clone(),
});
},
}
}

Expand Down
7 changes: 5 additions & 2 deletions pallets/ddc-verification/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use frame_support::{
};
use frame_system::mocking::MockBlock;
use pallet_staking::BalanceOf;
use sp_core::H256;
use sp_core::{ByteArray, H256};
use sp_runtime::{
curve::PiecewiseLinear,
testing::{TestXt, UintAuthorityId},
Expand Down Expand Up @@ -558,7 +558,10 @@ impl<T: Config> NodeVisitor<T> for MockNodeVisitor {
}

fn get_node_provider_id(_node_pub_key: &NodePubKey) -> Result<T::AccountId, DispatchError> {
unimplemented!()
let temp: AccountId = AccountId::from([0xa; 32]);
let account_1 = T::AccountId::decode(&mut &temp.as_slice()[..]).unwrap();

Ok(account_1)
}
}

Expand Down
26 changes: 0 additions & 26 deletions pallets/ddc-verification/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,39 +43,34 @@ fn get_validators() -> Vec<AccountId32> {
fn get_node_activities() -> Vec<NodeActivity> {
let node1 = NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
number_of_puts: 10,
number_of_gets: 20,
};
let node2 = NodeActivity {
node_id: "1".to_string(),
provider_id: "1".to_string(),
stored_bytes: 101,
transferred_bytes: 51,
number_of_puts: 11,
number_of_gets: 21,
};
let node3 = NodeActivity {
node_id: "2".to_string(),
provider_id: "2".to_string(),
stored_bytes: 102,
transferred_bytes: 52,
number_of_puts: 12,
number_of_gets: 22,
};
let node4 = NodeActivity {
node_id: "3".to_string(),
provider_id: "3".to_string(),
stored_bytes: 103,
transferred_bytes: 53,
number_of_puts: 13,
number_of_gets: 23,
};
let node5 = NodeActivity {
node_id: "4".to_string(),
provider_id: "4".to_string(),
stored_bytes: 104,
transferred_bytes: 54,
number_of_puts: 14,
Expand Down Expand Up @@ -103,15 +98,13 @@ fn fetch_node_usage_works() {

// Create a sample NodeActivity instance
let node_activity1 = NodeActivity {
provider_id: "1".to_string(),
node_id: "1".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
number_of_puts: 10,
number_of_gets: 20,
};
let node_activity2 = NodeActivity {
provider_id: "2".to_string(),
node_id: "2".to_string(),
stored_bytes: 110,
transferred_bytes: 510,
Expand Down Expand Up @@ -538,7 +531,6 @@ fn test_get_consensus_nodes_activity_success() {
(
node_pubkey_0,
vec![NodeActivity {
provider_id: "0".to_string(),
node_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
Expand All @@ -549,7 +541,6 @@ fn test_get_consensus_nodes_activity_success() {
(
node_pubkey_1,
vec![NodeActivity {
provider_id: "0".to_string(),
node_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
Expand All @@ -560,7 +551,6 @@ fn test_get_consensus_nodes_activity_success() {
(
node_pubkey_2,
vec![NodeActivity {
provider_id: "0".to_string(),
node_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
Expand Down Expand Up @@ -679,7 +669,6 @@ fn test_get_consensus_nodes_activity_not_enough_nodes() {
(
node_pubkey_0,
vec![NodeActivity {
provider_id: "0".to_string(),
node_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
Expand All @@ -690,7 +679,6 @@ fn test_get_consensus_nodes_activity_not_enough_nodes() {
(
node_pubkey_1,
vec![NodeActivity {
provider_id: "0".to_string(),
node_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
Expand Down Expand Up @@ -1008,7 +996,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus() {
node_pubkey_0,
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
number_of_puts: 10,
Expand All @@ -1019,7 +1006,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus() {
node_pubkey_1,
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 200,
transferred_bytes: 100,
number_of_puts: 20,
Expand All @@ -1030,7 +1016,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus() {
node_pubkey_2,
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 300,
transferred_bytes: 150,
number_of_puts: 30,
Expand Down Expand Up @@ -1184,7 +1169,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus2() {
node_pubkey_0.clone(),
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
number_of_puts: 10,
Expand All @@ -1195,7 +1179,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus2() {
node_pubkey_1.clone(),
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 200,
transferred_bytes: 100,
number_of_puts: 20,
Expand All @@ -1206,7 +1189,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus2() {
node_pubkey_2.clone(),
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 300,
transferred_bytes: 150,
number_of_puts: 30,
Expand All @@ -1217,7 +1199,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus2() {
node_pubkey_0,
vec![NodeActivity {
node_id: "1".to_string(),
provider_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
number_of_puts: 10,
Expand All @@ -1228,7 +1209,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus2() {
node_pubkey_1,
vec![NodeActivity {
node_id: "1".to_string(),
provider_id: "0".to_string(),
stored_bytes: 200,
transferred_bytes: 100,
number_of_puts: 20,
Expand All @@ -1239,7 +1219,6 @@ fn test_get_consensus_nodes_activity_not_in_consensus2() {
node_pubkey_2,
vec![NodeActivity {
node_id: "1".to_string(),
provider_id: "0".to_string(),
stored_bytes: 300,
transferred_bytes: 150,
number_of_puts: 30,
Expand Down Expand Up @@ -1292,7 +1271,6 @@ fn test_get_consensus_nodes_activity_diff_errors() {
node_pubkey_0.clone(),
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
number_of_puts: 10,
Expand All @@ -1303,7 +1281,6 @@ fn test_get_consensus_nodes_activity_diff_errors() {
node_pubkey_1.clone(),
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 200,
transferred_bytes: 100,
number_of_puts: 20,
Expand All @@ -1314,7 +1291,6 @@ fn test_get_consensus_nodes_activity_diff_errors() {
node_pubkey_2.clone(),
vec![NodeActivity {
node_id: "0".to_string(),
provider_id: "0".to_string(),
stored_bytes: 300,
transferred_bytes: 150,
number_of_puts: 30,
Expand All @@ -1325,7 +1301,6 @@ fn test_get_consensus_nodes_activity_diff_errors() {
node_pubkey_0,
vec![NodeActivity {
node_id: "1".to_string(),
provider_id: "0".to_string(),
stored_bytes: 100,
transferred_bytes: 50,
number_of_puts: 10,
Expand All @@ -1336,7 +1311,6 @@ fn test_get_consensus_nodes_activity_diff_errors() {
node_pubkey_1,
vec![NodeActivity {
node_id: "1".to_string(),
provider_id: "0".to_string(),
stored_bytes: 200,
transferred_bytes: 100,
number_of_puts: 20,
Expand Down
8 changes: 1 addition & 7 deletions runtime/cere-dev/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1401,13 +1401,7 @@ pub type SignedPayload = generic::SignedPayload<RuntimeCall, SignedExtra>;
pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, RuntimeCall, SignedExtra>;

/// Runtime migrations
type Migrations = (
pallet_ddc_clusters::migrations::v2::MigrateToV2<Runtime>,
pallet_ddc_staking::migrations::v1::MigrateToV1<Runtime>,
pallet_ddc_customers::migration::MigrateToV2<Runtime>,
pallet_ddc_customers::migration::MigrateToV1<Runtime>,
migrations::Unreleased,
);
type Migrations = (pallet_ddc_customers::migration::MigrateToV2<Runtime>, migrations::Unreleased);

/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Expand Down
1 change: 0 additions & 1 deletion runtime/cere/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1413,7 +1413,6 @@ type Migrations = (
pallet_ddc_clusters::migrations::v2::MigrateToV2<Runtime>,
pallet_ddc_staking::migrations::v1::MigrateToV1<Runtime>,
pallet_ddc_customers::migration::MigrateToV2<Runtime>,
pallet_ddc_customers::migration::MigrateToV1<Runtime>,
migrations::Unreleased,
);

Expand Down

0 comments on commit ee9c426

Please sign in to comment.