Commit 9a3b1811 authored by Chris Elder's avatar Chris Elder
Browse files

[FAB-7779] CouchDB indexes for private data



Any couchdb indexes defined for a chaincode will be deployed in
the chaincode state database.  Indexes for collections of private
data should be applied in the chaincode's collection state database
rather than in the chaincode state database.

Added support for HandleChaincodeDeploy event to common_storage_db.  This
will now process the event from chain code deploy.  The move was necessary
for handling the private data namespace mapping.

Added new function to statecouchdb for processing index creation.

Added new interface to statedb for IndexCapable.  This allows the
common_storage_db to detect that the state database will accept index
creation events.

Updated index validation to allow for indexes under collections directories.

Change-Id: Icfda923c584d953ce5886023f7411bbddb59d595
Signed-off-by: default avatarChris Elder <chris.elder@us.ibm.com>
parent 7400cc17
......@@ -17,6 +17,8 @@ limitations under the License.
package testutil
import (
"archive/tar"
"bytes"
"crypto/rand"
"encoding/json"
"fmt"
......@@ -169,3 +171,34 @@ func getCallerInfo() string {
}
return fmt.Sprintf("CallerInfo = [%s:%d]", file, line)
}
// TarFileEntry is a structure for adding test index files to an tar
type TarFileEntry struct {
Name, Body string
}
// CreateTarBytesForTest creates a tar byte array for unit testing
func CreateTarBytesForTest(testFiles []*TarFileEntry) []byte {
//Create a buffer for the tar file
buffer := new(bytes.Buffer)
tarWriter := tar.NewWriter(buffer)
for _, file := range testFiles {
tarHeader := &tar.Header{
Name: file.Name,
Mode: 0600,
Size: int64(len(file.Body)),
}
err := tarWriter.WriteHeader(tarHeader)
if err != nil {
return nil
}
_, err = tarWriter.Write([]byte(file.Body))
if err != nil {
return nil
}
}
// Make sure to check the error on Close.
tarWriter.Close()
return buffer.Bytes()
}
......@@ -11,17 +11,27 @@ import (
"fmt"
"path/filepath"
"reflect"
"regexp"
"strings"
)
// fileValidators are used as handlers to validate specific metadata directories
type fileValidator func(fileName string, fileBytes []byte) error
const allowedCharsCollectionName = "[A-Za-z0-9_-]+"
// Currently, the only metadata expected and allowed is for META-INF/statedb/couchdb/indexes.
var fileValidators = map[string]fileValidator{
"META-INF/statedb/couchdb/indexes": couchdbIndexFileValidator,
var fileValidators = map[*regexp.Regexp]fileValidator{
regexp.MustCompile("^META-INF/statedb/couchdb/indexes/.*[.]json"): couchdbIndexFileValidator,
regexp.MustCompile("^META-INF/statedb/couchdb/collections/" + allowedCharsCollectionName + "/indexes/.*[.]json"): couchdbIndexFileValidator,
}
var collectionNameValid = regexp.MustCompile("^" + allowedCharsCollectionName)
var fileNameValid = regexp.MustCompile("^.*[.]json")
var validDatabases = []string{"couchdb"}
// UnhandledDirectoryError is returned for metadata files in unhandled directories
type UnhandledDirectoryError struct {
err string
......@@ -31,15 +41,6 @@ func (e *UnhandledDirectoryError) Error() string {
return e.err
}
// BadExtensionError is returned for metadata files with extension other than .json
type BadExtensionError struct {
err string
}
func (e *BadExtensionError) Error() string {
return e.err
}
// InvalidIndexContentError is returned for metadata files with invalid content
type InvalidIndexContentError struct {
err string
......@@ -50,18 +51,18 @@ func (e *InvalidIndexContentError) Error() string {
}
// ValidateMetadataFile checks that metadata files are valid
// according to the validation rules of the metadata directory (metadataType)
func ValidateMetadataFile(fileName string, fileBytes []byte, metadataType string) error {
// according to the validation rules of the file's directory
func ValidateMetadataFile(filePathName string, fileBytes []byte) error {
// Get the validator handler for the metadata directory
fileValidator, ok := fileValidators[metadataType]
fileValidator := selectFileValidator(filePathName)
// If there is no validator handler for metadata directory, return UnhandledDirectoryError
if !ok {
return &UnhandledDirectoryError{fmt.Sprintf("Metadata not supported in directory: %s", metadataType)}
if fileValidator == nil {
return &UnhandledDirectoryError{buildMetadataFileErrorMessage(filePathName)}
}
// If the file is not valid for the given metadata directory, return the corresponding error
err := fileValidator(fileName, fileBytes)
// If the file is not valid for the given directory-based validator, return the corresponding error
err := fileValidator(filePathName, fileBytes)
if err != nil {
return err
}
......@@ -70,16 +71,70 @@ func ValidateMetadataFile(fileName string, fileBytes []byte, metadataType string
return nil
}
// couchdbIndexFileValidator implements fileValidator
func couchdbIndexFileValidator(fileName string, fileBytes []byte) error {
func buildMetadataFileErrorMessage(filePathName string) string {
ext := filepath.Ext(fileName)
dir, filename := filepath.Split(filePathName)
// if the file does not have a .json extension, then return as error
if ext != ".json" {
return &BadExtensionError{fmt.Sprintf("Index metadata file [%s] does not have a .json extension", fileName)}
if !strings.HasPrefix(filePathName, "META-INF/statedb") {
return fmt.Sprintf("metadata file path must begin with META-INF/statedb, found: %s", dir)
}
directoryArray := strings.Split(filepath.Clean(dir), "/")
// verify the minimum directory depth
if len(directoryArray) < 4 {
return fmt.Sprintf("metadata file path must include a database and index directory: %s", dir)
}
// validate the database type
if !contains(validDatabases, directoryArray[2]) {
return fmt.Sprintf("database name [%s] is not supported, valid options: %s", directoryArray[2], validDatabases)
}
// verify "indexes" is under the database name
if len(directoryArray) == 4 && directoryArray[3] != "indexes" {
return fmt.Sprintf("metadata file path does not have an indexes directory: %s", dir)
}
// if this is for collections, check the path length
if len(directoryArray) != 6 {
return fmt.Sprintf("metadata file path for collections must include a collections and index directory: %s", dir)
}
// verify "indexes" is under the collections and collection directories
if directoryArray[3] != "collections" || directoryArray[5] != "indexes" {
return fmt.Sprintf("metadata file path for collections must have a collections and indexes directory: %s", dir)
}
// validate the collection name
if !collectionNameValid.MatchString(directoryArray[4]) {
return fmt.Sprintf("collection name is not valid: %s", directoryArray[4])
}
// validate the file name
if !fileNameValid.MatchString(filename) {
return fmt.Sprintf("artifact file name is not valid: %s", filename)
}
return fmt.Sprintf("metadata file path or name is not supported: %s", dir)
}
func contains(validStrings []string, target string) bool {
for _, str := range validStrings {
if str == target {
return true
}
}
return false
}
func selectFileValidator(filePathName string) fileValidator {
for validateExp, fileValidator := range fileValidators {
isValid := validateExp.MatchString(filePathName)
if isValid {
return fileValidator
}
}
return nil
}
// couchdbIndexFileValidator implements fileValidator
func couchdbIndexFileValidator(fileName string, fileBytes []byte) error {
// if the content does not validate as JSON, return err to invalidate the file
boolIsJSON, indexDefinition := isJSON(fileBytes)
if !boolIsJSON {
......
......@@ -7,6 +7,7 @@ SPDX-License-Identifier: Apache-2.0
package ccmetadata
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
......@@ -22,11 +23,10 @@ func TestGoodIndexJSON(t *testing.T) {
cleanupDir(testDir)
defer cleanupDir(testDir)
fileName := "myIndex.json"
fileName := "META-INF/statedb/couchdb/indexes/myIndex.json"
fileBytes := []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
metadataType := "META-INF/statedb/couchdb/indexes"
err := ValidateMetadataFile(fileName, fileBytes, metadataType)
err := ValidateMetadataFile(fileName, fileBytes)
assert.NoError(t, err, "Error validating a good index")
}
......@@ -35,11 +35,10 @@ func TestBadIndexJSON(t *testing.T) {
cleanupDir(testDir)
defer cleanupDir(testDir)
fileName := "myIndex.json"
fileName := "META-INF/statedb/couchdb/indexes/myIndex.json"
fileBytes := []byte("invalid json")
metadataType := "META-INF/statedb/couchdb/indexes"
err := ValidateMetadataFile(fileName, fileBytes, metadataType)
err := ValidateMetadataFile(fileName, fileBytes)
assert.Error(t, err, "Should have received an InvalidIndexContentError")
......@@ -55,12 +54,10 @@ func TestIndexWrongLocation(t *testing.T) {
cleanupDir(testDir)
defer cleanupDir(testDir)
fileName := "myIndex.json"
fileName := "META-INF/statedb/couchdb/myIndex.json"
fileBytes := []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
// place the index one directory too high
metadataType := "META-INF/statedb/couchdb"
err := ValidateMetadataFile(fileName, fileBytes, metadataType)
err := ValidateMetadataFile(fileName, fileBytes)
assert.Error(t, err, "Should have received an UnhandledDirectoryError")
// Type assertion on UnhandledDirectoryError
......@@ -77,9 +74,8 @@ func TestInvalidMetadataType(t *testing.T) {
fileName := "myIndex.json"
fileBytes := []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
metadataType := "Invalid metadata type"
err := ValidateMetadataFile(fileName, fileBytes, metadataType)
err := ValidateMetadataFile(fileName, fileBytes)
assert.Error(t, err, "Should have received an UnhandledDirectoryError")
// Type assertion on UnhandledDirectoryError
......@@ -94,14 +90,80 @@ func TestBadMetadataExtension(t *testing.T) {
fileName := "myIndex.go"
fileBytes := []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
metadataType := "META-INF/statedb/couchdb/indexes"
err := ValidateMetadataFile(fileName, fileBytes, metadataType)
assert.Error(t, err, "Should have received an BadExtensionError")
err := ValidateMetadataFile(fileName, fileBytes)
assert.Error(t, err, "Should have received an error")
}
func TestBadFilePaths(t *testing.T) {
testDir := filepath.Join(packageTestDir, "BadMetadataExtension")
cleanupDir(testDir)
defer cleanupDir(testDir)
// Test bad META-INF
fileName := "META-INF1/statedb/couchdb/indexes/test1.json"
fileBytes := []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
err := ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.Error(t, err, "Should have received an error for bad META-INF directory")
// Test bad path length
fileName = "META-INF/statedb/test1.json"
fileBytes = []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
err = ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.Error(t, err, "Should have received an error for bad length")
// Test invalid database name
fileName = "META-INF/statedb/goleveldb/indexes/test1.json"
fileBytes = []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
err = ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.Error(t, err, "Should have received an error for invalid database")
// Test invalid indexes directory name
fileName = "META-INF/statedb/couchdb/index/test1.json"
fileBytes = []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
err = ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.Error(t, err, "Should have received an error for invalid indexes directory")
// Test invalid collections directory name
fileName = "META-INF/statedb/couchdb/collection/testcoll/indexes/test1.json"
fileBytes = []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
err = ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.Error(t, err, "Should have received an error for invalid collections directory")
// Test valid collections name
fileName = "META-INF/statedb/couchdb/collections/testcoll/indexes/test1.json"
fileBytes = []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
err = ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.NoError(t, err, "Error should not have been thrown for a valid collection name")
// Test invalid collections name
fileName = "META-INF/statedb/couchdb/collections/#testcoll/indexes/test1.json"
fileBytes = []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
err = ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.Error(t, err, "Should have received an error for an invalid collection name")
// Test invalid collections name
fileName = "META-INF/statedb/couchdb/collections/testcoll/indexes/test1.txt"
fileBytes = []byte(`{"index":{"fields":["data.docType","data.owner"]},"name":"indexOwner","type":"json"}`)
// Type assertion on BadExtensionError
_, ok := err.(*BadExtensionError)
assert.True(t, ok, "Should have received an BadExtensionError")
err = ValidateMetadataFile(fileName, fileBytes)
fmt.Println(err)
assert.Error(t, err, "Should have received an error for an invalid file name")
}
......
......@@ -442,7 +442,7 @@ func (goPlatform *Platform) GetDeploymentPayload(spec *pb.ChaincodeSpec) ([]byte
}
// Split the tar location (file.Name) into a tar package directory and filename
packageDir, filename := filepath.Split(file.Name)
_, filename := filepath.Split(file.Name)
// Hidden files are not supported as metadata, therefore ignore them.
// User often doesn't know that hidden files are there, and may not be able to delete them, therefore warn user rather than error out.
......@@ -457,9 +457,8 @@ func (goPlatform *Platform) GetDeploymentPayload(spec *pb.ChaincodeSpec) ([]byte
}
// Validate metadata file for inclusion in tar
// Validation is based on the passed metadata directory, e.g. META-INF/statedb/couchdb/indexes
// Clean metadata directory to remove trailing slash
err = ccmetadata.ValidateMetadataFile(filename, fileBytes, filepath.Clean(packageDir))
// Validation is based on the passed filename with path
err = ccmetadata.ValidateMetadataFile(file.Name, fileBytes)
if err != nil {
return nil, err
}
......
......@@ -13,6 +13,7 @@ import (
"io"
"io/ioutil"
"path/filepath"
"strings"
"github.com/hyperledger/fabric/core/chaincode/platforms"
)
......@@ -59,15 +60,19 @@ func ExtractStatedbArtifactsFromCCPackage(ccpackage CCPackage) (statedbArtifacts
}
// ExtractFileEntries extract file entries from the given `tarBytes`. A file entry is included in the
// returned results only if it is located in the dir specified in the `filterDirs` parameter
func ExtractFileEntries(tarBytes []byte, filterDirs map[string]bool) ([]*TarFileEntry, error) {
var fileEntries []*TarFileEntry
//initialize a tar reader
// returned results only if it is located in a directory under the indicated databaseType directory
// Example for chaincode indexes:
// "META-INF/statedb/couchdb/indexes/indexColorSortName.json"
// Example for collection scoped indexes:
// "META-INF/statedb/couchdb/collections/collectionMarbles/indexes/indexCollMarbles.json"
// An empty string will have the effect of returning all statedb metadata. This is useful in validating an
// archive in the future with multiple database types
func ExtractFileEntries(tarBytes []byte, databaseType string) (map[string][]*TarFileEntry, error) {
indexArtifacts := map[string][]*TarFileEntry{}
tarReader := tar.NewReader(bytes.NewReader(tarBytes))
for {
//read the next header from the tar
tarHeader, err := tarReader.Next()
//if the EOF is detected, then exit
hdr, err := tarReader.Next()
if err == io.EOF {
// end of tar archive
break
......@@ -75,17 +80,17 @@ func ExtractFileEntries(tarBytes []byte, filterDirs map[string]bool) ([]*TarFile
if err != nil {
return nil, err
}
ccproviderLogger.Debugf("Processing entry from tar: %s", tarHeader.Name)
//Ensure that this is a file located in the dir present in the 'filterDirs'
if !tarHeader.FileInfo().IsDir() && filterDirs[filepath.Dir(tarHeader.Name)] {
ccproviderLogger.Debugf("Selecting file entry from tar: %s", tarHeader.Name)
//read the tar entry into a byte array
//split the directory from the full name
dir, _ := filepath.Split(hdr.Name)
//remove the ending slash
if strings.HasPrefix(hdr.Name, "META-INF/statedb/"+databaseType) {
fileContent, err := ioutil.ReadAll(tarReader)
if err != nil {
return nil, err
}
fileEntries = append(fileEntries, &TarFileEntry{tarHeader, fileContent})
indexArtifacts[filepath.Clean(dir)] = append(indexArtifacts[filepath.Clean(dir)], &TarFileEntry{FileHeader: hdr, FileContent: fileContent})
}
}
return fileEntries, nil
return indexArtifacts, nil
}
......@@ -94,7 +94,7 @@ func WriteFolderToTarPackage(tw *tar.Writer, srcPath string, excludeDir string,
packagepath = localpath[rootDirLen+1:]
// Split the tar packagepath into a tar package directory and filename
packageDir, filename := filepath.Split(packagepath)
_, filename := filepath.Split(packagepath)
// Hidden files are not supported as metadata, therefore ignore them.
// User often doesn't know that hidden files are there, and may not be able to delete them, therefore warn user rather than error out.
......@@ -103,15 +103,14 @@ func WriteFolderToTarPackage(tw *tar.Writer, srcPath string, excludeDir string,
return nil
}
fileBytes, err := ioutil.ReadFile(localpath)
if err != nil {
return err
fileBytes, errRead := ioutil.ReadFile(localpath)
if errRead != nil {
return errRead
}
// Validate metadata file for inclusion in tar
// Validation is based on the passed metadata directory, e.g. META-INF/statedb/couchdb/indexes
// Clean metadata directory to remove trailing slash
err = ccmetadata.ValidateMetadataFile(filename, fileBytes, filepath.Clean(packageDir))
// Validation is based on the fully qualified path of the file
err = ccmetadata.ValidateMetadataFile(packagepath, fileBytes)
if err != nil {
return err
}
......
......@@ -216,7 +216,7 @@ func Test_WriteFolderToTarPackageFailure2(t *testing.T) {
err := WriteFolderToTarPackage(tw, srcPath, "", nil, nil)
assert.Error(t, err, "Should have received error writing folder to package")
assert.Contains(t, err.Error(), "file [bad.json] is not a valid JSON")
assert.Contains(t, err.Error(), "Index metadata file [META-INF/statedb/couchdb/indexes/bad.json] is not a valid JSON")
tw.Close()
gw.Close()
......@@ -231,7 +231,7 @@ func Test_WriteFolderToTarPackageFailure3(t *testing.T) {
err := WriteFolderToTarPackage(tw, srcPath, "", nil, nil)
assert.Error(t, err, "Should have received error writing folder to package")
assert.Contains(t, err.Error(), "Metadata not supported in directory: META-INF")
assert.Contains(t, err.Error(), "metadata file path must begin with META-INF/statedb")
tw.Close()
gw.Close()
......
......@@ -9,7 +9,10 @@ package privacyenabledstate
import (
"encoding/base64"
"fmt"
"strings"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/core/common/ccprovider"
"github.com/hyperledger/fabric/core/ledger/cceventmgmt"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb"
......@@ -19,6 +22,8 @@ import (
"github.com/hyperledger/fabric/core/ledger/ledgerconfig"
)
var logger = flogging.MustGetLogger("privacyenabledstate")
const (
nsJoiner = "$$"
pvtDataPrefix = "p"
......@@ -118,9 +123,9 @@ func (s *CommonStorageDB) ClearCachedVersions() {
// GetChaincodeEventListener implements corresponding function in interface DB
func (s *CommonStorageDB) GetChaincodeEventListener() cceventmgmt.ChaincodeLifecycleEventListener {
ccListener, ok := s.VersionedDB.(cceventmgmt.ChaincodeLifecycleEventListener)
_, ok := s.VersionedDB.(statedb.IndexCapable)
if ok {
return ccListener
return s
}
return nil
}
......@@ -190,6 +195,48 @@ func (s *CommonStorageDB) ApplyPrivacyAwareUpdates(updates *UpdateBatch, height
return s.VersionedDB.ApplyUpdates(updates.PubUpdates.UpdateBatch, height)
}
//HandleChaincodeDeploy initializes database artifacts for the database associated with the namespace
func (s *CommonStorageDB) HandleChaincodeDeploy(chaincodeDefinition *cceventmgmt.ChaincodeDefinition, dbArtifactsTar []byte) error {
//Check to see if the interface for IndexCapable is implemented
indexCapable, ok := s.VersionedDB.(statedb.IndexCapable)
if !ok {
return nil
}
if chaincodeDefinition == nil {
return fmt.Errorf("chaincode definition not found while creating couchdb index on chain")
}
dbArtifacts, err := ccprovider.ExtractFileEntries(dbArtifactsTar, indexCapable.GetDBType())
if err != nil {
logger.Errorf("error during extracting db artifacts from tar for chaincode=[%s] on chain=[%s]. error=%s",
chaincodeDefinition, chaincodeDefinition.Name, err)
return nil
}
for directoryPath, archiveDirectoryEntries := range dbArtifacts {
// split the directory name
directoryPathArray := strings.Split(directoryPath, "/")
// process the indexes for the chain
if directoryPathArray[3] == "indexes" {
indexCapable.ProcessIndexesForChaincodeDeploy(chaincodeDefinition.Name, archiveDirectoryEntries)
continue
}
// check for the indexes directory for the collection
if directoryPathArray[3] == "collections" && directoryPathArray[5] == "indexes" {
collectionName := directoryPathArray[4]
indexCapable.ProcessIndexesForChaincodeDeploy(derivePvtDataNs(chaincodeDefinition.Name, collectionName),
archiveDirectoryEntries)
}
}
return nil
}
// ChaincodeDeployDone is a noop for couchdb state impl
func (s *CommonStorageDB) ChaincodeDeployDone(succeeded bool) {
// NOOP
}
func derivePvtDataNs(namespace, collection string) string {
return namespace + nsJoiner + pvtDataPrefix + collection
}
......
......@@ -13,6 +13,8 @@ import (
"testing"
"github.com/hyperledger/fabric/common/ledger/testutil"
"github.com/hyperledger/fabric/core/common/ccprovider"
"github.com/hyperledger/fabric/core/ledger/cceventmgmt"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version"
"github.com/hyperledger/fabric/core/ledger/util"
......@@ -407,6 +409,91 @@ func TestCompositeKeyMap(t *testing.T) {
testutil.AssertEquals(t, ok, false)
}
func TestHandleChainCodeDeployOnCouchDB(t *testing.T) {
for _, env := range testEnvs {
_, ok := env.(*CouchDBCommonStorageTestEnv)
if !ok {
continue
}
t.Run(env.GetName(), func(t *testing.T) {
testHandleChainCodeDeploy(t, env)
})
}
}
func testHandleChainCodeDeploy(t *testing.T, env TestEnv) {
env.Init(t)
defer env.Cleanup()
db := env.GetDBHandle("test-handle-chaincode-deploy")
chaincodeDef := &cceventmgmt.ChaincodeDefinition{Name: "ns1", Hash: nil, Version: ""}
commonStorageDB := db.(*CommonStorageDB)
// Test indexes for side databases
dbArtifactsTarBytes := testutil.CreateTarBytesForTest(
[]*testutil.TarFileEntry{
{"META-INF/statedb/couchdb/indexes/indexColorSortName.json", `{"index":{"fields":[{"color":"desc"}]},"ddoc":"indexColorSortName","name":"indexColorSortName","type":"json"}`},
{"META-INF/statedb/couchdb/indexes/indexSizeSortName.json", `{"index":{"fields":[{"size":"desc"}]},"ddoc":"indexSizeSortName","name":"indexSizeSortName","type":"json"}`},
{"META-INF/statedb/couchdb/collections/collectionMarbles/indexes/indexCollMarbles.json", `{"index":{"fields":["docType","owner"]},"ddoc":"indexCollectionMarbles", "name":"indexCollectionMarbles","type":"json"}`},
{"META-INF/statedb/couchdb/collections/collectionMarblesPrivateDetails/indexes/indexCollPrivDetails.json", `{"index":{"fields":["docType","price"]},"ddoc":"indexPrivateDetails", "name":"indexPrivateDetails","type":"json"}`},
},
)
// Test the retrieveIndexArtifacts method