diff --git a/translib/app_utils_test.go b/translib/app_utils_test.go index 7ffd7ae0b8f4..4f4d5ea63183 100644 --- a/translib/app_utils_test.go +++ b/translib/app_utils_test.go @@ -152,7 +152,6 @@ func processDeleteRequest(url string) func(*testing.T) { func getConfigDb() *db.DB { configDb, _ := db.NewDB(db.Options{ DBNo: db.ConfigDB, - InitIndicator: "CONFIG_DB_INITIALIZED", TableNameSeparator: "|", KeySeparator: "|", }) diff --git a/translib/db/db.go b/translib/db/db.go index 431a2ff01096..03467faa2250 100644 --- a/translib/db/db.go +++ b/translib/db/db.go @@ -111,6 +111,7 @@ import ( // "reflect" "errors" "strings" + "time" "github.com/Azure/sonic-mgmt-common/cvl" "github.com/go-redis/redis/v7" @@ -122,6 +123,8 @@ const ( DefaultRedisUNIXSocket string = "/var/run/redis/redis.sock" DefaultRedisLocalTCPEP string = "localhost:6379" DefaultRedisRemoteTCPEP string = "127.0.0.1:6379" + DefaultRedisUNIXNetwork string = "unix" + DefaultRedisTCPNetwork string = "tcp" ) func init() { @@ -155,6 +158,7 @@ type Options struct { TableNameSeparator string //Overriden by the DB config file's separator. KeySeparator string //Overriden by the DB config file's separator. IsWriteDisabled bool //Indicated if write is allowed + IsCacheEnabled bool //Is cache (Per Connection) allowed? IsOnChangeEnabled bool // whether OnChange cache enabled DisableCVLCheck bool @@ -194,7 +198,12 @@ func (s _txState) String() string { } const ( - InitialTxPipelineSize int = 100 + InitialTxPipelineSize int = 100 + InitialTablesCount int = 20 + InitialTableEntryCount int = 50 + InitialTablePatternCount int = 5 + InitialMapsCount int = 10 + InitialMapKeyCount int = 50 ) // TableSpec gives the name of the table, and other per-table customizations. @@ -211,50 +220,14 @@ type TableSpec struct { NoDelete bool } -func (v Value) String() string { - var str string - for k, v1 := range v.Field { - str = str + fmt.Sprintf("\"%s\": \"%s\"\n", k, v1) - } - - return str -} - -// Value gives the fields as a map. -// (Eg: { Field: map[string]string { "type" : "l3v6", "ports" : "eth0" } } ). -type Value struct { - Field map[string]string -} - -// Table gives the entire table a a map. -// (Eg: { ts: &TableSpec{ Name: "ACL_TABLE" }, -// entry: map[string]Value { -// "ACL_TABLE|acl1|rule1_1": Value { -// Field: map[string]string { -// "type" : "l3v6", "ports" : "Ethernet0", -// } -// }, -// "ACL_TABLE|acl1|rule1_2": Value { -// Field: map[string]string { -// "type" : "l3v6", "ports" : "eth0", -// } -// }, -// } -// }) - -type Table struct { - ts *TableSpec - entry map[string]Value - db *DB -} - -type dbCache struct { - Tables map[string]Table -} - const ( ConnectionClosed = tlerr.TranslibDBInvalidState("connection closed") OnChangeDisabled = tlerr.TranslibDBInvalidState("OnChange disabled") + SupportsReadOnly = tlerr.TranslibDBInvalidState("Supported on read only") + + OnChangeNoSupport = tlerr.TranslibDBInvalidState("OnChange not supported") + SupportsCfgDBOnly = tlerr.TranslibDBInvalidState("Supported on CfgDB only") + UseGetEntry = tlerr.TranslibDBInvalidState("Use GetEntry()") ) type _txOp int @@ -278,13 +251,21 @@ type DB struct { client *redis.Client Opts *Options - txState _txState - txCmds []_txCmd + txState _txState + txCmds []_txCmd + txTsEntryMap map[string]map[string]Value //map[TableSpec.Name]map[Entry]Value + cv *cvl.CVL cvlEditConfigData []cvl.CVLEditConfigData onCReg dbOnChangeReg // holds OnChange enabled table names - cache dbCache // holds OnChange cache + // dbCache is used by both PerConnection cache, and OnChange cache + // On a DB handle, the two are mutually exclusive. + cache dbCache + stats DBStats + + dbStatsConfig DBStatsConfig + dbCacheConfig DBCacheConfig /* sKeys []*SKey // Subscribe Key array @@ -346,61 +327,26 @@ func GetdbNameToIndex(dbName string) DBNum { // NewDB is the factory method to create new DB's. func NewDB(opt Options) (*DB, error) { - var d DB var e error if glog.V(3) { glog.Info("NewDB: Begin: opt: ", opt) } - ipAddr := DefaultRedisLocalTCPEP - dbId := int(opt.DBNo) - if dbInstName := getDBInstName(opt.DBNo); dbInstName != "" { - if isDbInstPresent(dbInstName) { - ipAddr = getDbTcpAddr(dbInstName) - dbId = getDbId(dbInstName) - dbSepStr := getDbSeparator(dbInstName) - if len(dbSepStr) > 0 { - if len(opt.TableNameSeparator) > 0 && opt.TableNameSeparator != dbSepStr { - glog.Warning(fmt.Sprintf("TableNameSeparator '%v' in the Options is different from the" + - " one configured in the Db config. file for the Db name %v", opt.TableNameSeparator, dbInstName)) - } - opt.KeySeparator = dbSepStr - opt.TableNameSeparator = dbSepStr - } else { - glog.Warning("Database Separator not present for the Db name: ", dbInstName) - } - } else { - glog.Warning("Database instance not present for the Db name: ", dbInstName) - } - } else { - glog.Errorf("NewDB: invalid database number: %d", dbId) - e = tlerr.TranslibDBCannotOpen{} - goto NewDBExit - } + // Time Start + var now time.Time + var dur time.Duration + now = time.Now() - if opt.IsOnChangeEnabled && !opt.IsWriteDisabled { - glog.Errorf("NewDB: IsEnableOnChange cannot be set on write enabled DB") - e = tlerr.TranslibDBCannotOpen{} - goto NewDBExit - } - - d = DB{client: redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: ipAddr, - //Addr: DefaultRedisRemoteTCPEP, - Password: "", /* TBD */ - // DB: int(4), /* CONFIG_DB DB No. */ - DB: dbId, - DialTimeout: 0, - // For Transactions, limit the pool - PoolSize: 1, - // Each DB gets it own (single) connection. - }), + d := DB{client: redis.NewClient(adjustRedisOpts(&opt)), Opts: &opt, txState: txStateNone, txCmds: make([]_txCmd, 0, InitialTxPipelineSize), cvlEditConfigData: make([]cvl.CVLEditConfigData, 0, InitialTxPipelineSize), + dbStatsConfig: getDBStatsConfig(), + stats: DBStats{Tables: make(map[string]Stats, InitialTablesCount), Maps: make(map[string]Stats, InitialMapsCount)}, + dbCacheConfig: getDBCacheConfig(), + cache: dbCache{Tables: make(map[string]Table, InitialTablesCount), Maps: make(map[string]MAP, InitialMapsCount)}, } if d.client == nil { @@ -409,10 +355,36 @@ func NewDB(opt Options) (*DB, error) { goto NewDBExit } + if opt.IsOnChangeEnabled && !opt.IsWriteDisabled { + glog.Errorf("NewDB: IsEnableOnChange cannot be set on write enabled DB") + e = tlerr.TranslibDBCannotOpen{} + goto NewDBExit + } + if opt.IsOnChangeEnabled { d.onCReg = dbOnChangeReg{CacheTables: make(map[string]bool)} } + if opt.IsCacheEnabled && opt.IsOnChangeEnabled { + glog.Error("Per Connection cache cannot be enabled with OnChange cache") + glog.Error("Disabling Per Connection caching") + opt.IsCacheEnabled = false + } + + if !d.Opts.IsWriteDisabled { + if d.dbCacheConfig.PerConnection { + glog.Info("NewDB: IsWriteDisabled false. Disable Cache") + } + d.dbCacheConfig.PerConnection = false + } + + if !d.Opts.IsCacheEnabled { + if d.dbCacheConfig.PerConnection { + glog.Info("NewDB: IsCacheEnabled false. Disable Cache") + } + d.dbCacheConfig.PerConnection = false + } + if opt.DBNo != ConfigDB { if glog.V(3) { glog.Info("NewDB: ! ConfigDB. Skip init. check.") @@ -422,19 +394,38 @@ func NewDB(opt Options) (*DB, error) { if len(d.Opts.InitIndicator) == 0 { - glog.V(5).Info("NewDB: Init indication not requested") + if glog.V(5) { + glog.Info("NewDB: Init indication not requested") + } + + } else { - } else if init, _ := d.client.Get(d.Opts.InitIndicator).Int(); init != 1 { + glog.V(3).Info("NewDB: RedisCmd: ", d.Name(), ": ", "GET ", + d.Opts.InitIndicator) + if init, err := d.client.Get(d.Opts.InitIndicator).Int(); init != 1 { - glog.Error("NewDB: Database not inited") - e = tlerr.TranslibDBNotInit{} - goto NewDBExit + glog.Error("NewDB: Database not inited: ", d.Name(), ": GET ", + d.Opts.InitIndicator) + if err != nil { + glog.Error("NewDB: Database not inited: ", d.Name(), ": GET ", + d.Opts.InitIndicator, " returns err: ", err) + } + d.client.Close() + e = tlerr.TranslibDBNotInit{} + goto NewDBExit + } } NewDBSkipInitIndicatorCheck: NewDBExit: + if d.dbStatsConfig.TimeStats { + dur = time.Since(now) + } + + dbGlobalStats.updateStats(d.Opts.DBNo, true, dur, &(d.stats)) + if glog.V(3) { glog.Info("NewDB: End: d: ", d, " e: ", e) } @@ -444,19 +435,23 @@ NewDBExit: // DeleteDB is the gentle way to close the DB connection. func (d *DB) DeleteDB() error { - if d == nil { - return nil + if !d.IsOpen() { + return ConnectionClosed } if glog.V(3) { glog.Info("DeleteDB: Begin: d: ", d) } + dbGlobalStats.updateStats(d.Opts.DBNo, false, 0, &(d.stats)) + if d.txState != txStateNone { glog.Warning("DeleteDB: not txStateNone, txState: ", d.txState) } - return d.client.Close() + err := d.client.Close() + d.client = nil + return err } func (d *DB) Name() string { @@ -495,6 +490,16 @@ func (d *DB) redis2key(ts *TableSpec, redisKey string) Key { } +// redis2ts_key works only if keys don't contain the (Table|Key)Separator char +// (The TableSpec does not have the CompCt) +func (d *DB) redis2ts_key(redisKey string) (TableSpec, Key) { + + splitTable := strings.SplitN(redisKey, d.Opts.TableNameSeparator, 2) + + return TableSpec{Name: splitTable[0]}, + Key{strings.Split(splitTable[1], d.Opts.KeySeparator)} +} + func (d *DB) ts2redisUpdated(ts *TableSpec) string { if glog.V(5) { @@ -526,28 +531,58 @@ func (d *DB) getEntry(ts *TableSpec, key Key, forceReadDB bool) (Value, error) { glog.Info("GetEntry: Begin: ", "ts: ", ts, " key: ", key) } - var value Value + // GetEntryHits + // Time Start var cacheHit bool + var txCacheHit bool + var now time.Time + var dur time.Duration + var stats Stats + if d.dbStatsConfig.TimeStats { + now = time.Now() + } + + var table Table + var value Value var e error + var v map[string]string + var ok bool entry := d.key2redis(ts, key) - useCache := d.Opts.IsOnChangeEnabled && d.onCReg.isCacheTable(ts.Name) - - if !forceReadDB && useCache { - if table, ok := d.cache.Tables[ts.Name]; ok { - if value, ok = table.entry[entry]; ok { - value = value.Copy() - cacheHit = true + useCache := ((d.Opts.IsOnChangeEnabled && d.onCReg.isCacheTable(ts.Name)) || + (d.dbCacheConfig.PerConnection && + d.dbCacheConfig.isCacheTable(ts.Name))) + + // check in Tx cache first + if value, ok = d.txTsEntryMap[ts.Name][entry]; !ok { + // If cache GetFromCache (CacheHit?) + if (useCache && !forceReadDB) { + if table, ok = d.cache.Tables[ts.Name]; ok { + if value, ok = table.entry[entry]; ok { + value = value.Copy() + cacheHit = true + } } } + } else { + value = value.Copy() + txCacheHit = true } - if !cacheHit { - value.Field, e = d.client.HGetAll(d.key2redis(ts, key)).Result() + if !cacheHit && !txCacheHit { + // Increase (i.e. more verbose) V() level if it gets too noisy. + if glog.V(3) { + glog.Info("getEntry: RedisCmd: ", d.Name(), ": ", "HGETALL ", entry) + } + v, e = d.client.HGetAll(entry).Result() + value = Value{Field: v} } if e != nil { - glog.V(2).Infof("GetEntry: %s: HGetAll(%q) error: %v", d.Name(), entry, e) + if glog.V(2) { + glog.Errorf("GetEntry: %s: HGetAll(%q) error: %v", d.Name(), + entry, e) + } value = Value{} } else if !value.IsPopulated() { @@ -557,7 +592,7 @@ func (d *DB) getEntry(ts *TableSpec, key Key, forceReadDB bool) (Value, error) { // e = errors.New("Entry does not exist") e = tlerr.TranslibRedisClientEntryNotExist{Entry: d.key2redis(ts, key)} - } else if !cacheHit && useCache { + } else if !cacheHit && !txCacheHit && useCache { if _, ok := d.cache.Tables[ts.Name]; !ok { if d.cache.Tables == nil { d.cache.Tables = make(map[string]Table, d.onCReg.size()) @@ -565,12 +600,47 @@ func (d *DB) getEntry(ts *TableSpec, key Key, forceReadDB bool) (Value, error) { d.cache.Tables[ts.Name] = Table{ ts: ts, entry: make(map[string]Value), + complete: false, + patterns: make(map[string][]Key), db: d, } } d.cache.Tables[ts.Name].entry[entry] = value.Copy() } + // Time End, Time, Peak + if d.dbStatsConfig.TableStats { + stats = d.stats.Tables[ts.Name] + } else { + stats = d.stats.AllTables + } + + stats.Hits++ + stats.GetEntryHits++ + if cacheHit { + stats.GetEntryCacheHits++ + } + + if d.dbStatsConfig.TimeStats { + dur = time.Since(now) + + if dur > stats.Peak { + stats.Peak = dur + } + stats.Time += dur + + if dur > stats.GetEntryPeak { + stats.GetEntryPeak = dur + } + stats.GetEntryTime += dur + } + + if d.dbStatsConfig.TableStats { + d.stats.Tables[ts.Name] = stats + } else { + d.stats.AllTables = stats + } + if glog.V(3) { glog.Info("GetEntry: End: ", "value: ", value, " e: ", e) } @@ -580,66 +650,199 @@ func (d *DB) getEntry(ts *TableSpec, key Key, forceReadDB bool) (Value, error) { // GetKeys retrieves all entry/row keys. func (d *DB) GetKeys(ts *TableSpec) ([]Key, error) { - return d.GetKeysPattern(ts, Key{Comp: []string{"*"}}); + // If ts contains (Key|TableName)Separator (Eg: "|"), translate this to + // a GetKeysPattern, by extracting the initial Key Comps from TableName + // Slice into the t(able) (a)N(d) k(ey)Pat(tern) if any + if tNkPat := strings.SplitN(ts.Name, d.Opts.TableNameSeparator, + 2); len(tNkPat) == 2 { + + tsNk := &TableSpec{Name: tNkPat[0], CompCt: ts.CompCt} + pat := Key{Comp: append(strings.Split(tNkPat[1], d.Opts.KeySeparator), + "*")} + glog.Warningf("GetKeys: Separator in TableSpec %v is Deprecated. "+ + "Using TableSpec.Name %s, Pattern %v", ts, tsNk.Name, pat) + return d.GetKeysPattern(tsNk, pat) + } + + return d.GetKeysPattern(ts, Key{Comp: []string{"*"}}) } func (d *DB) GetKeysPattern(ts *TableSpec, pat Key) ([]Key, error) { + // GetKeysHits + // Time Start + var cacheHit bool + var now time.Time + var dur time.Duration + var stats Stats + var table Table + var keys []Key + var e error + if glog.V(3) { glog.Info("GetKeys: Begin: ", "ts: ", ts, "pat: ", pat) } - redisKeys, e := d.client.Keys(d.key2redis(ts,pat)).Result() - if glog.V(4) { - glog.Info("GetKeys: redisKeys: ", redisKeys, " e: ", e) + if !d.IsOpen() { + return keys, ConnectionClosed } - keys := make([]Key, 0, len(redisKeys)) - for i := 0; i < len(redisKeys); i++ { - keys = append(keys, d.redis2key(ts, redisKeys[i])) + defer func() { + if e != nil { + glog.Error("GetKeys: ts: ", ts, " e: ", e) + } + if glog.V(3) { + glog.Info("GetKeys: End: ", "keys: ", keys, " e: ", e) + } + }() + + if d.dbStatsConfig.TimeStats { + now = time.Now() } - if glog.V(3) { - glog.Info("GetKeys: End: ", "keys: ", keys, " e: ", e) + // If pseudoDB then set it up separately. TBD + + // If cache GetFromCache (CacheHit?) + if d.dbCacheConfig.PerConnection && d.dbCacheConfig.isCacheTable(ts.Name) { + var ok bool + if table, ok = d.cache.Tables[ts.Name]; ok { + if keys, ok = table.patterns[d.key2redis(ts, pat)]; ok { + cacheHit = true + } + } } - return keys, e -} + if !cacheHit { + // Increase (i.e. more verbose) V() level if it gets too noisy. + if glog.V(3) { + glog.Info("GetKeysPattern: RedisCmd: ", d.Name(), ": ", "KEYS ", d.key2redis(ts, pat)) + } + var redisKeys []string + redisKeys, e = d.client.Keys(d.key2redis(ts, pat)).Result() -// GetKeysByPattern retrieves all entry/row keysi matching -// with the given pattern. -func (d *DB) GetKeysByPattern(ts *TableSpec, pattern string) ([]Key, error) { + keys = make([]Key, 0, len(redisKeys)) + for i := 0; i < len(redisKeys); i++ { + keys = append(keys, d.redis2key(ts, redisKeys[i])) + } - if glog.V(3) { - glog.Info("GetKeysByPattern: Begin: ", "ts: ", ts) + if e != nil { + return keys, e + } + + // If cache SetCache (i.e. a cache miss) + if d.dbCacheConfig.PerConnection && d.dbCacheConfig.isCacheTable(ts.Name) { + if _, ok := d.cache.Tables[ts.Name]; !ok { + d.cache.Tables[ts.Name] = Table{ + ts: ts, + entry: make(map[string]Value, InitialTableEntryCount), + complete: false, + patterns: make(map[string][]Key, InitialTablePatternCount), + db: d, + } + } + // Make a copy for the Per Connection cache which is always + // *before* adjusting with Redis CAS Tx Cache. + keysCopy := make([]Key, len(keys)) + for i, key := range keys { + keysCopy[i] = key.Copy() + } + d.cache.Tables[ts.Name].patterns[d.key2redis(ts, pat)] = keysCopy + } } - redisKeys, e := d.client.Keys(d.key2redis(ts, - Key{Comp: []string{pattern}})).Result() - if glog.V(4) { - glog.Info("GetKeysByPattern: redisKeys: ", redisKeys, " e: ", e) + for k := range d.txTsEntryMap[ts.Name] { + if patternMatch(k, 0, d.key2redis(ts, pat), 0) { + var present bool + var index int + key := d.redis2key(ts, k) + for i := 0; i < len(keys); i++ { + index = i + if key.Equals(keys[i]) { + present = true + break + } + } + if !present { + if len(d.txTsEntryMap[ts.Name][k].Field) > 0 { + keys = append(keys, key) + } + } else { + if len(d.txTsEntryMap[ts.Name][k].Field) == 0 { + keys = append(keys[:index], keys[index+1:]...) + } + } + } } - keys := make([]Key, 0, len(redisKeys)) - for i := 0; i < len(redisKeys); i++ { - keys = append(keys, d.redis2key(ts, redisKeys[i])) + // Time End, Time, Peak + if d.dbStatsConfig.TableStats { + stats = d.stats.Tables[ts.Name] + } else { + stats = d.stats.AllTables } - if glog.V(3) { - glog.Info("GetKeysByPattern: End: ", "keys: ", keys, " e: ", e) + stats.Hits++ + stats.GetKeysPatternHits++ + if cacheHit { + stats.GetKeysPatternCacheHits++ + } + if (len(pat.Comp) == 1) && (pat.Comp[0] == "*") { + stats.GetKeysHits++ + if cacheHit { + stats.GetKeysCacheHits++ + } + } + + if d.dbStatsConfig.TimeStats { + dur = time.Since(now) + + if dur > stats.Peak { + stats.Peak = dur + } + stats.Time += dur + + if dur > stats.GetKeysPatternPeak { + stats.GetKeysPatternPeak = dur + } + stats.GetKeysPatternTime += dur + + if (len(pat.Comp) == 1) && (pat.Comp[0] == "*") { + + if dur > stats.GetKeysPeak { + stats.GetKeysPeak = dur + } + stats.GetKeysTime += dur + } + } + + if d.dbStatsConfig.TableStats { + d.stats.Tables[ts.Name] = stats + } else { + d.stats.AllTables = stats } return keys, e } +// GetKeysByPattern retrieves all entry/row keys matching with the given pattern +// Deprecated: use GetKeysPattern() +func (d *DB) GetKeysByPattern(ts *TableSpec, pattern string) ([]Key, error) { + glog.Warning("GetKeysByPattern() is deprecated and it will be removed in the future, please use GetKeysPattern()") + return d.GetKeysPattern(ts, Key{Comp: []string{pattern}}) +} + // DeleteKeys deletes all entry/row keys matching a pattern. func (d *DB) DeleteKeys(ts *TableSpec, key Key) error { if glog.V(3) { glog.Info("DeleteKeys: Begin: ", "ts: ", ts, " key: ", key) } + if !d.IsOpen() { + return ConnectionClosed + } + // This can be done via a LUA script as well. For now do this. TBD - redisKeys, e := d.client.Keys(d.key2redis(ts, key)).Result() + redisKeys, e := d.GetKeysPattern(ts, key) if glog.V(4) { glog.Info("DeleteKeys: redisKeys: ", redisKeys, " e: ", e) } @@ -648,10 +851,10 @@ func (d *DB) DeleteKeys(ts *TableSpec, key Key) error { if glog.V(4) { glog.Info("DeleteKeys: Deleting redisKey: ", redisKeys[i]) } - e = d.DeleteEntry(ts, d.redis2key(ts, redisKeys[i])) + e = d.DeleteEntry(ts, redisKeys[i]) if e != nil { glog.Warning("DeleteKeys: Deleting: ts: ", ts, " key", - d.redis2key(ts, redisKeys[i]), " : ", e) + redisKeys[i], " : ", e) } } @@ -693,20 +896,20 @@ func (d *DB) doCVL(ts *TableSpec, cvlOps []cvl.CVLOperation, key Key, vals []Val switch cvlOps[i] { case cvl.OP_CREATE, cvl.OP_UPDATE: - cvlEditConfigData.Data = vals[i].Field + cvlEditConfigData.Data = vals[i].Copy().Field d.cvlEditConfigData = append(d.cvlEditConfigData, cvlEditConfigData) case cvl.OP_DELETE: if len(vals[i].Field) == 0 { cvlEditConfigData.Data = map[string]string{} } else { - cvlEditConfigData.Data = vals[i].Field + cvlEditConfigData.Data = vals[i].Copy().Field } d.cvlEditConfigData = append(d.cvlEditConfigData, cvlEditConfigData) default: glog.Error("doCVL: Unknown, op: ", cvlOps[i]) - e = fmt.Errorf("Unknown Op: %d", cvlOps[i]) + e = errors.New("Unknown Op: " + string(rune(cvlOps[i]))) } } @@ -743,10 +946,12 @@ doCVLExit: return e } -func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { +func (d *DB) doWrite(ts *TableSpec, op _txOp, k Key, val interface{}) error { var e error = nil var value Value + key := k.Copy() + if d.Opts.IsWriteDisabled { glog.Error("doWrite: Write to DB disabled") e = errors.New("Write to DB disabled during this operation") @@ -755,7 +960,9 @@ func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { switch d.txState { case txStateNone: - glog.Info("doWrite: No Transaction.") + if glog.V(3) { + glog.Info("doWrite: No Transaction.") + } case txStateWatch: if glog.V(2) { glog.Info("doWrite: Change to txStateSet, txState: ", d.txState) @@ -770,7 +977,7 @@ func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { e = errors.New("Cannot issue {Set|Mod|Delete}Entry in txStateMultiExec") default: glog.Error("doWrite: Unknown, txState: ", d.txState) - e = fmt.Errorf("Unknown State: %d", d.txState) + e = errors.New("Unknown State: " + string(rune(d.txState))) } if e != nil { @@ -780,6 +987,8 @@ func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { // No Transaction case. No CVL. if d.txState == txStateNone { + glog.Info("doWrite: RedisCmd: ", d.Name(), ": ", getOperationName(op), " ", d.key2redis(ts, key), " ", getTableValuesInString(op, val)) + switch op { case txOpHMSet: @@ -792,7 +1001,8 @@ func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { e = d.client.HMSet(d.key2redis(ts, key), vintf).Err() if e != nil { - glog.Error("doWrite: HMSet: ", key, " : ", value, " e: ", e) + glog.Error("doWrite: ", d.Name(), ": HMSet: ", key, " : ", + value, " e: ", e) } case txOpHDel: @@ -803,18 +1013,19 @@ func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { e = d.client.HDel(d.key2redis(ts, key), fields...).Err() if e != nil { - glog.Error("doWrite: HDel: ", key, " : ", fields, " e: ", e) + glog.Error("doWrite: ", d.Name(), ": HDel: ", key, " : ", + fields, " e: ", e) } case txOpDel: e = d.client.Del(d.key2redis(ts, key)).Err() if e != nil { - glog.Error("doWrite: Del: ", key, " : ", e) + glog.Error("doWrite: ", d.Name(), ": Del: ", key, " : ", e) } default: glog.Error("doWrite: Unknown, op: ", op) - e = fmt.Errorf("Unknown Op: %d", op) + e = errors.New("Unknown Op: " + string(rune(op))) } goto doWriteExit @@ -824,15 +1035,44 @@ func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { glog.Info("doWrite: op: ", op, " ", d.key2redis(ts, key), " : ", value) + if _, ok := d.txTsEntryMap[ts.Name]; !ok { + d.txTsEntryMap[ts.Name] = make(map[string]Value) + } + switch op { case txOpHMSet, txOpHDel: - value = val.(Value) + value = val.(Value).Copy() + entry := d.key2redis(ts, key) + if _, ok := d.txTsEntryMap[ts.Name][entry]; !ok { + var v map[string]string + glog.Info("doWrite: RedisCmd: ", d.Name(), ": ", "HGETALL ", d.key2redis(ts, key)) + v, e = d.client.HGetAll(d.key2redis(ts, key)).Result() + if len(v) != 0 { + d.txTsEntryMap[ts.Name][entry] = Value{Field: v} + } else { + d.txTsEntryMap[ts.Name][entry] = Value{Field: make(map[string]string)} + } + } + if op == txOpHMSet { + for k := range value.Field { + d.txTsEntryMap[ts.Name][entry].Field[k] = value.Field[k] + } + } else { + if _, ok := d.txTsEntryMap[ts.Name][entry]; ok { + for k := range value.Field { + delete(d.txTsEntryMap[ts.Name][entry].Field, k) + } + } + } case txOpDel: + entry := d.key2redis(ts, key) + delete(d.txTsEntryMap[ts.Name], entry) + d.txTsEntryMap[ts.Name][entry] = Value{Field: make(map[string]string)} default: glog.Error("doWrite: Unknown, op: ", op) - e = fmt.Errorf("Unknown Op: %d", op) + e = errors.New("Unknown Op: " + string(rune(op))) } if e != nil { @@ -845,6 +1085,7 @@ func (d *DB) doWrite(ts *TableSpec, op _txOp, key Key, val interface{}) error { key: &key, value: &value, }) + d.stats.AllTables.TxCmdsLen = uint(len(d.txCmds)) doWriteExit: @@ -931,20 +1172,41 @@ setEntryExit: // CreateEntry creates an entry(row) in the table. func (d *DB) CreateEntry(ts *TableSpec, key Key, value Value) error { + if !d.IsOpen() { + return ConnectionClosed + } + return d.setEntry(ts, key, value, true) } // SetEntry sets an entry(row) in the table. func (d *DB) SetEntry(ts *TableSpec, key Key, value Value) error { + if !d.IsOpen() { + return ConnectionClosed + } + return d.setEntry(ts, key, value, false) } func (d *DB) Publish(channel string, message interface{}) error { + if !d.IsOpen() { + return ConnectionClosed + } + e := d.client.Publish(channel, message).Err() return e } func (d *DB) RunScript(script *redis.Script, keys []string, args ...interface{}) *redis.Cmd { + if !d.IsOpen() { + return nil + } + + if d.Opts.DBNo == ConfigDB { + glog.Info("RunScript: Not supported for ConfigDB") + return nil + } + return script.Run(d.client, keys, args...) } @@ -956,6 +1218,10 @@ func (d *DB) DeleteEntry(ts *TableSpec, key Key) error { glog.Info("DeleteEntry: Begin: ", "ts: ", ts, " key: ", key) } + if !d.IsOpen() { + return ConnectionClosed + } + if glog.V(3) { glog.Info("DeleteEntry: DoCVL for DELETE") } @@ -978,11 +1244,19 @@ func (d *DB) ModEntry(ts *TableSpec, key Key, value Value) error { " value: ", value) } + if !d.IsOpen() { + return ConnectionClosed + } + if len(value.Field) == 0 { if ts.NoDelete { - glog.Info("ModEntry: NoDelete flag is true, skipping deletion of the entry.") + if glog.V(3) { + glog.Info("ModEntry: NoDelete flag is true, skipping deletion of the entry.") + } } else { - glog.Info("ModEntry: Mapping to DeleteEntry()") + if glog.V(3) { + glog.Info("ModEntry: Mapping to DeleteEntry()") + } e = d.DeleteEntry(ts, key) } goto ModEntryExit @@ -1010,6 +1284,10 @@ func (d *DB) DeleteEntryFields(ts *TableSpec, key Key, value Value) error { " value: ", value) } + if !d.IsOpen() { + return ConnectionClosed + } + if glog.V(3) { glog.Info("DeleteEntryFields: DoCVL for HDEL (post-POC)") } @@ -1021,81 +1299,22 @@ func (d *DB) DeleteEntryFields(ts *TableSpec, key Key, value Value) error { e := d.doCVL(ts, []cvl.CVLOperation{cvl.OP_DELETE}, key, []Value{value}) if e == nil { - d.doWrite(ts, txOpHDel, key, value) + e = d.doWrite(ts, txOpHDel, key, value) } return e } -// GetTable gets the entire table. -func (d *DB) GetTable(ts *TableSpec) (Table, error) { - if glog.V(3) { - glog.Info("GetTable: Begin: ts: ", ts) - } - - /* - table := Table{ - ts: ts, - entry: map[string]Value{ - "table1|k0.0|k0.1": Value{ - map[string]string{ - "f0.0": "v0.0", - "f0.1": "v0.1", - "f0.2": "v0.2", - }, - }, - "table1|k1.0|k1.1": Value{ - map[string]string{ - "f1.0": "v1.0", - "f1.1": "v1.1", - "f1.2": "v1.2", - }, - }, - }, - db: d, - } - */ - - // Create Table - table := Table{ - ts: ts, - entry: make(map[string]Value), - db: d, - } - - // This can be done via a LUA script as well. For now do this. TBD - // Read Keys - keys, e := d.GetKeys(ts) - if e != nil { - glog.Error("GetTable: GetKeys: " + e.Error()) - goto GetTableExit - } - - // For each key in Keys - // Add Value into table.entry[key)] - for i := 0; i < len(keys); i++ { - value, e := d.GetEntry(ts, keys[i]) - if e != nil { - glog.Warning("GetTable: GetKeys: " + e.Error()) - continue - } - table.entry[d.key2redis(ts, keys[i])] = value - } - -GetTableExit: - - if glog.V(3) { - glog.Info("GetTable: End: table: ", table) - } - return table, e -} - // DeleteTable deletes the entire table. func (d *DB) DeleteTable(ts *TableSpec) error { if glog.V(3) { glog.Info("DeleteTable: Begin: ts: ", ts) } + if !d.IsOpen() { + return ConnectionClosed + } + // This can be done via a LUA script as well. For now do this. TBD // Read Keys keys, e := d.GetKeys(ts) @@ -1107,7 +1326,7 @@ func (d *DB) DeleteTable(ts *TableSpec) error { // For each key in Keys // Delete the entry for i := 0; i < len(keys); i++ { - // Don't define/declare a nested scope ``e'' + // Don't define/declare a nested scope ``e'' e = d.DeleteEntry(ts, keys[i]) if e != nil { glog.Warning("DeleteTable: DeleteEntry: " + e.Error()) @@ -1121,129 +1340,6 @@ DeleteTableExit: return e } -// GetKeys method retrieves all entry/row keys from a previously read table. -func (t *Table) GetKeys() ([]Key, error) { - if glog.V(3) { - glog.Info("Table.GetKeys: Begin: t: ", t) - } - keys := make([]Key, 0, len(t.entry)) - for k := range t.entry { - keys = append(keys, t.db.redis2key(t.ts, k)) - } - - if glog.V(3) { - glog.Info("Table.GetKeys: End: keys: ", keys) - } - return keys, nil -} - -// GetEntry method retrieves an entry/row from a previously read table. -func (t *Table) GetEntry(key Key) (Value, error) { - /* - return Value{map[string]string{ - "f0.0": "v0.0", - "f0.1": "v0.1", - "f0.2": "v0.2", - }, - }, nil - */ - if glog.V(3) { - glog.Info("Table.GetEntry: Begin: t: ", t, " key: ", key) - } - v := t.entry[t.db.key2redis(t.ts, key)] - if glog.V(3) { - glog.Info("Table.GetEntry: End: entry: ", v) - } - return v, nil -} - -//===== Functions for db.Value ===== - -func (v Value) Copy() (rV Value) { - rV = Value{Field: make(map[string]string, len(v.Field))} - for k, v1 := range v.Field { - rV.Field[k] = v1 - } - return -} - -func (v *Value) IsPopulated() bool { - return len(v.Field) > 0 -} - -// Has function checks if a field exists. -func (v *Value) Has(name string) bool { - _, flag := v.Field[name] - return flag -} - -// Get returns the value of a field. Returns empty string if the field -// does not exists. Use Has() function to check existance of field. -func (v *Value) Get(name string) string { - return v.Field[name] -} - -// Set function sets a string value for a field. -func (v *Value) Set(name, value string) { - v.Field[name] = value -} - -// GetInt returns value of a field as int. Returns 0 if the field does -// not exists. Returns an error if the field value is not a number. -func (v *Value) GetInt(name string) (int, error) { - data, ok := v.Field[name] - if ok { - return strconv.Atoi(data) - } - return 0, nil -} - -// SetInt sets an integer value for a field. -func (v *Value) SetInt(name string, value int) { - v.Set(name, strconv.Itoa(value)) -} - -// GetList returns the value of a an array field. A "@" suffix is -// automatically appended to the field name if not present (as per -// swsssdk convention). Field value is split by comma and resulting -// slice is returned. Empty slice is returned if field not exists. -func (v *Value) GetList(name string) []string { - var data string - if strings.HasSuffix(name, "@") { - data = v.Get(name) - } else { - data = v.Get(name + "@") - } - - if len(data) == 0 { - return []string{} - } - - return strings.Split(data, ",") -} - -// SetList function sets an list value to a field. Field name and -// value are formatted as per swsssdk conventions: -// - A "@" suffix is appended to key name -// - Field value is the comma separated string of list items -func (v *Value) SetList(name string, items []string) { - if !strings.HasSuffix(name, "@") { - name += "@" - } - - if len(items) != 0 { - data := strings.Join(items, ",") - v.Set(name, data) - } else { - v.Remove(name) - } -} - -// Remove function removes a field from this Value. -func (v *Value) Remove(name string) { - delete(v.Field, name) -} - ////////////////////////////////////////////////////////////////////////// // The Transaction API for translib infra ////////////////////////////////////////////////////////////////////////// @@ -1280,6 +1376,8 @@ func (d *DB) StartTx(w []WatchKeys, tss []*TableSpec) error { glog.Info("StartTx: Begin: w: ", w, " tss: ", tss) } + d.txTsEntryMap = make(map[string]map[string]Value) + var e error = nil var ret cvl.CVLRetCode @@ -1332,6 +1430,7 @@ AppendWatchTxExit: func (d *DB) performWatch(w []WatchKeys, tss []*TableSpec) error { var e error + var first_e error var args []interface{} // For each watchkey @@ -1350,9 +1449,13 @@ func (d *DB) performWatch(w []WatchKeys, tss []*TableSpec) error { continue } + glog.Info("performWatch: RedisCmd: ", d.Name(), ": ", "KEYS ", redisKey) redisKeys, e := d.client.Keys(redisKey).Result() if e != nil { glog.Warning("performWatch: Keys: " + e.Error()) + if first_e == nil { + first_e = e + } continue } for j := 0; j < len(redisKeys); j++ { @@ -1372,10 +1475,14 @@ func (d *DB) performWatch(w []WatchKeys, tss []*TableSpec) error { } // Issue the WATCH + glog.Info("performWatch: Do: ", args) _, e = d.client.Do(args...).Result() if e != nil { glog.Warning("performWatch: Do: WATCH ", args, " e: ", e.Error()) + if first_e == nil { + first_e = e + } } SkipWatch: @@ -1383,7 +1490,7 @@ SkipWatch: // Switch State d.txState = txStateWatch - return e + return first_e } // CommitTx method is used by infra to commit a check-and-set Transaction. @@ -1411,7 +1518,7 @@ func (d *DB) CommitTx() error { e = errors.New("Cannot issue MULTI in txStateMultiExec") default: glog.Error("CommitTx: Unknown, txState: ", d.txState) - e = fmt.Errorf("Unknown State: %d", d.txState) + e = errors.New("Unknown State: " + string(rune(d.txState))) } if e != nil { @@ -1419,10 +1526,12 @@ func (d *DB) CommitTx() error { } // Issue MULTI + glog.Info("CommitTx: Do: MULTI") _, e = d.client.Do("MULTI").Result() if e != nil { glog.Warning("CommitTx: Do: MULTI e: ", e.Error()) + goto CommitTxExit } // For each cmd in txCmds @@ -1447,10 +1556,6 @@ func (d *DB) CommitTx() error { args = append(args, k, v) } - if glog.V(4) { - glog.Info("CommitTx: Do: ", args) - } - _, e = d.client.Do(args...).Result() case txOpHDel: @@ -1462,10 +1567,6 @@ func (d *DB) CommitTx() error { args = append(args, k) } - if glog.V(4) { - glog.Info("CommitTx: Do: ", args) - } - _, e = d.client.Do(args...).Result() case txOpDel: @@ -1473,39 +1574,56 @@ func (d *DB) CommitTx() error { args = make([]interface{}, 0, 2) args = append(args, "DEL", redisKey) - if glog.V(4) { - glog.Info("CommitTx: Do: ", args) - } - _, e = d.client.Do(args...).Result() default: glog.Error("CommitTx: Unknown, op: ", d.txCmds[i].op) - e = fmt.Errorf("Unknown Op: %d", d.txCmds[i].op) + e = errors.New("Unknown Op: " + string(rune(d.txCmds[i].op))) } + glog.Info("CommitTx: RedisCmd: ", d.Name(), ": ", args) + if e != nil { glog.Warning("CommitTx: Do: ", args, " e: ", e.Error()) + break } } + if e != nil { + goto CommitTxExit + } + // Flag the Tables as updated. for ts := range tsmap { + if glog.V(4) { + glog.Info("CommitTx: Do: SET ", d.ts2redisUpdated(&ts), " 1") + } _, e = d.client.Do("SET", d.ts2redisUpdated(&ts), "1").Result() if e != nil { glog.Warning("CommitTx: Do: SET ", d.ts2redisUpdated(&ts), " 1: e: ", e.Error()) + break } } + + if e != nil { + goto CommitTxExit + } + + if glog.V(4) { + glog.Info("CommitTx: Do: SET ", d.ts2redisUpdated(&TableSpec{Name: "*"}), " 1") + } _, e = d.client.Do("SET", d.ts2redisUpdated(&TableSpec{Name: "*"}), - "1").Result() + strconv.FormatInt(time.Now().UnixNano(), 10)).Result() if e != nil { glog.Warning("CommitTx: Do: SET ", "CONFIG_DB_UPDATED", " 1: e: ", e.Error()) + goto CommitTxExit } // Issue EXEC + glog.Info("CommitTx: Do: EXEC") _, e = d.client.Do("EXEC").Result() if e != nil { @@ -1513,18 +1631,22 @@ func (d *DB) CommitTx() error { e = tlerr.TranslibTransactionFail{} } +CommitTxExit: // Switch State, Clear Command list d.txState = txStateNone d.txCmds = d.txCmds[:0] d.cvlEditConfigData = d.cvlEditConfigData[:0] + d.txTsEntryMap = make(map[string]map[string]Value) //Close CVL session - if ret := cvl.ValidationSessClose(d.cv); ret != cvl.CVL_SUCCESS { - glog.Error("CommitTx: End: Error in closing CVL session") + if d.cv != nil { + if ret := cvl.ValidationSessClose(d.cv); ret != cvl.CVL_SUCCESS { + glog.Error("CommitTx: End: Error in closing CVL session: ret: ", + cvl.GetErrorString(ret)) + } + d.cv = nil } - d.cv = nil -CommitTxExit: if glog.V(3) { glog.Info("CommitTx: End: e: ", e) } @@ -1555,7 +1677,7 @@ func (d *DB) AbortTx() error { e = errors.New("Cannot issue UNWATCH in txStateMultiExec") default: glog.Error("AbortTx: Unknown, txState: ", d.txState) - e = fmt.Errorf("Unknown State: %d", d.txState) + e = errors.New("Unknown State: " + string(rune(d.txState))) } if e != nil { @@ -1563,26 +1685,286 @@ func (d *DB) AbortTx() error { } // Issue UNWATCH + glog.Info("AbortTx: Do: UNWATCH") _, e = d.client.Do("UNWATCH").Result() if e != nil { glog.Warning("AbortTx: Do: UNWATCH e: ", e.Error()) } +AbortTxExit: // Switch State, Clear Command list d.txState = txStateNone d.txCmds = d.txCmds[:0] d.cvlEditConfigData = d.cvlEditConfigData[:0] + d.txTsEntryMap = make(map[string]map[string]Value) //Close CVL session - if ret := cvl.ValidationSessClose(d.cv); ret != cvl.CVL_SUCCESS { - glog.Error("AbortTx: End: Error in closing CVL session") + if d.cv != nil { + if ret := cvl.ValidationSessClose(d.cv); ret != cvl.CVL_SUCCESS { + glog.Error("AbortTx: End: Error in closing CVL session: ret: ", + cvl.GetErrorString(ret)) + } + d.cv = nil } - d.cv = nil -AbortTxExit: if glog.V(3) { glog.Info("AbortTx: End: e: ", e) } return e } + +func getOperationName(op _txOp) string { + switch op { + case txOpNone: + return "No Operation" + case txOpHMSet: + return "HMSET" + case txOpHDel: + return "HDEL" + case txOpDel: + return "DEL" + } + return "" +} + +func getTableValuesInString(op _txOp, val interface{}) string { + var values string + + switch op { + + case txOpHMSet: + for k, v := range val.(Value).Field { + values += k + " " + v + " " + } + case txOpHDel: + for k := range val.(Value).Field { + values += k + " " + } + } + + return values +} + +// GetEntries retrieves the entries from the table for the given keys +// using redis pipelining, if the key is not present in the cache. +// returns slice of value and error; Note: error slice will be nil, +// if there is no error occurred for any of the given keys. +func (d *DB) GetEntries(ts *TableSpec, keys []Key) ([]Value, []error) { + if (d == nil) || (d.client == nil) { + values := make([]Value, len(keys)) + errors := make([]error, len(keys)) + for i := range errors { + errors[i] = ConnectionClosed + } + + return values, errors + } + + return d.getEntries(ts, keys, false) +} + +func (d *DB) getEntries(ts *TableSpec, keys []Key, forceReadDB bool) ([]Value, []error) { + + if glog.V(3) { + glog.Info("GetEntries: Begin: ", "ts: ", ts, " keys: ", keys) + } + + var now time.Time + if d.dbStatsConfig.TimeStats { + now = time.Now() + } + + var values = make([]Value, len(keys)) + var errors []error + + var dur time.Duration + var stats Stats + var cacheHit bool + var txCacheHit bool + var cacheChk bool + var tblExist bool + + var tbl Table + + if (d.dbCacheConfig.PerConnection && + d.dbCacheConfig.isCacheTable(ts.Name)) || + (d.Opts.IsOnChangeEnabled && d.onCReg.isCacheTable(ts.Name)) { + cacheChk = true + tbl, tblExist = d.cache.Tables[ts.Name] + } + + if d.dbStatsConfig.TableStats { + stats = d.stats.Tables[ts.Name] + } else { + stats = d.stats.AllTables + } + + // to keep the order of the input keys + var keyIdxs []int + var dbKeys []string + + for idx, key := range keys { + cacheHit = false + txCacheHit = false + entry := d.key2redis(ts, key) + + if valueTx, exist := d.txTsEntryMap[ts.Name][entry]; !exist { + if cacheChk && !forceReadDB { + if value, ok := tbl.entry[entry]; ok { + values[idx] = value.Copy() + cacheHit = true + } + } + } else { + values[idx] = valueTx.Copy() + txCacheHit = true + if len(valueTx.Field) == 0 { + keyErr := tlerr.TranslibRedisClientEntryNotExist{Entry: entry} + setError(keyErr, idx, &errors, len(keys)) + } + } + + if !cacheHit && !txCacheHit { + keyIdxs = append(keyIdxs, idx) + dbKeys = append(dbKeys, entry) + } + + if cacheHit { + stats.GetEntryCacheHits++ + } + } + + if len(dbKeys) > 0 { + // get the values for the keys using redis pipeline + entryList, err := d.getMultiEntry(ts, dbKeys) + if err != nil { + glog.Error("GetEntries: ", d.Name(), + ": error in getMultiEntry(", ts.Name, "): ", err.Error()) + if errors == nil { + errors = make([]error, len(keys)) + } + for i, dbKey := range dbKeys { + keyIdx := keyIdxs[i] + values[keyIdx] = Value{} + errors[keyIdx] = tlerr.TranslibRedisClientEntryNotExist{Entry: dbKey} + } + } else { + // iterate the keys to fill the value and error slice + for i, dbKey := range dbKeys { + keyIdx := keyIdxs[i] + v := entryList[i] + + if v == nil { + values[keyIdx] = Value{} + keyErr := tlerr.TranslibRedisClientEntryNotExist{Entry: dbKey} + setError(keyErr, keyIdx, &errors, len(keys)) + continue + } + + dbValue := Value{} + res, e := v.Result() + if e != nil { + values[keyIdx] = dbValue + setError(e, keyIdx, &errors, len(keys)) + glog.Warningf("GetEntries: %s: error %s; for the key %s", + d.Name(), e.Error(), dbKey) + } else { + dbValue.Field = res + values[keyIdx] = dbValue + } + + if len(dbValue.Field) != 0 { + if cacheChk { + if !tblExist { + d.cache.Tables[ts.Name] = Table{ + ts: ts, + entry: make(map[string]Value, InitialTableEntryCount), + complete: false, + patterns: make(map[string][]Key, InitialTablePatternCount), + db: d, + } + tblExist = true + } + d.cache.Tables[ts.Name].entry[dbKey] = dbValue.Copy() + } + } else if e == nil { + if glog.V(4) { + glog.Info("GetEntries: pipe.HGetAll(): empty map for the key: ", dbKey) + } + keyErr := tlerr.TranslibRedisClientEntryNotExist{Entry: dbKey} + setError(keyErr, keyIdx, &errors, len(keys)) + } + } + } + } + + stats.GetEntryHits = stats.GetEntryHits + uint(len(keys)) + stats.Hits++ + stats.GetEntriesHits++ + + if d.dbStatsConfig.TimeStats { + dur = time.Since(now) + + if dur > stats.Peak { + stats.Peak = dur + } + stats.Time += dur + + if dur > stats.GetEntriesPeak { + stats.GetEntriesPeak = dur + } + stats.GetEntriesTime += dur + } + + if d.dbStatsConfig.TableStats { + d.stats.Tables[ts.Name] = stats + } else { + d.stats.AllTables = stats + } + + if glog.V(3) { + glog.Info("GetEntries: End: ", "ts: ", ts, "values: ", values, " errors: ", errors) + } + + return values, errors +} + +func setError(e error, idx int, errors *[]error, numKeys int) { + if *errors == nil { + *errors = make([]error, numKeys) + } + (*errors)[idx] = e +} + +// getMultiEntry retrieves the entries of the given keys using "redis pipeline". +func (d *DB) getMultiEntry(ts *TableSpec, keys []string) ([]*redis.StringStringMapCmd, error) { + + if glog.V(3) { + glog.Info("getMultiEntry: Begin: ts: ", ts) + } + + var results = make([]*redis.StringStringMapCmd, len(keys)) + + pipe := d.client.Pipeline() + defer pipe.Close() + + if glog.V(3) { + glog.Info("getMultiEntry: RedisCmd: ", d.Name(), ": ", "pipe.HGetAll for the ", keys) + } + + for i, key := range keys { + results[i] = pipe.HGetAll(key) + } + + if glog.V(3) { + glog.Info("getMultiEntry: RedisCmd: ", d.Name(), ": ", "pipe.Exec") + } + _, err := pipe.Exec() + + if glog.V(3) { + glog.Info("getMultiEntry: End: ts: ", ts, "results: ", results, "err: ", err) + } + + return results, err +} diff --git a/translib/db/db_cache.go b/translib/db/db_cache.go new file mode 100644 index 000000000000..93687c6dbce8 --- /dev/null +++ b/translib/db/db_cache.go @@ -0,0 +1,260 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2020 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + // "fmt" + // "strconv" + + // "errors" + "reflect" + "strings" + "sync" + // "github.com/Azure/sonic-mgmt-common/cvl" + // "github.com/go-redis/redis/v7" + // "github.com/golang/glog" + // "github.com/Azure/sonic-mgmt-common/translib/tlerr" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +type dbCache struct { + Tables map[string]Table + Maps map[string]MAP +} + +type DBGlobalCache struct { + Databases [MaxDB]dbCache +} + +type DBCacheConfig struct { + PerConnection bool // Enable per DB conn cache + Global bool // Enable global cache (TBD) + CacheTables map[string]bool // Only cache these tables. + // Empty == Cache all tables + NoCacheTables map[string]bool // Do not cache these tables. + // "all" == Do not cache any tables + CacheMaps map[string]bool // Only cache these maps. + // Empty == Cache all maps + NoCacheMaps map[string]bool // Do not cache these maps + // "all" == Do not cache any maps +} + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +func ReconfigureCache() error { + return dbCacheConfig.reconfigure() +} + +func ClearCache() error { + return nil // TBD for Global Cache +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Functions // +//////////////////////////////////////////////////////////////////////////////// + +var dbCacheConfig *DBCacheConfig +var defaultDBCacheConfig DBCacheConfig = DBCacheConfig{ + PerConnection: false, + Global: false, +} +var reconfigureCacheConfig bool +var mutexCacheConfig sync.Mutex + +// var zeroDBCache = &DBCache{} + +func init() { + dbCacheConfig = &DBCacheConfig{} + dbCacheConfig.handleReconfigureSignal() + dbCacheConfig.reconfigure() +} + +//////////////////////////////////////////////////////////////////////////////// +// Configure DB Cache // +//////////////////////////////////////////////////////////////////////////////// + +func getDBCacheConfig() DBCacheConfig { + + dbCacheConfig.reconfigure() + + mutexCacheConfig.Lock() + + cacheConfig := DBCacheConfig{ + CacheTables: make(map[string]bool, len(dbCacheConfig.CacheTables)), + NoCacheTables: make(map[string]bool, len(dbCacheConfig.NoCacheTables)), + CacheMaps: make(map[string]bool, len(dbCacheConfig.CacheMaps)), + NoCacheMaps: make(map[string]bool, len(dbCacheConfig.NoCacheMaps)), + } + + cacheConfig.PerConnection = dbCacheConfig.PerConnection + cacheConfig.Global = dbCacheConfig.Global + + for k, v := range dbCacheConfig.CacheTables { + cacheConfig.CacheTables[k] = v + } + + for k, v := range dbCacheConfig.NoCacheTables { + cacheConfig.NoCacheTables[k] = v + } + + for k, v := range dbCacheConfig.CacheMaps { + cacheConfig.CacheMaps[k] = v + } + + for k, v := range dbCacheConfig.NoCacheMaps { + cacheConfig.NoCacheMaps[k] = v + } + + mutexCacheConfig.Unlock() + + return cacheConfig +} + +func (config *DBCacheConfig) reconfigure() error { + mutexCacheConfig.Lock() + var doReconfigure bool = reconfigureCacheConfig + if reconfigureCacheConfig { + reconfigureCacheConfig = false + } + mutexCacheConfig.Unlock() + + if doReconfigure { + var readDBCacheConfig DBCacheConfig + readDBCacheConfig.readFromDB() + + mutexCacheConfig.Lock() + configChanged := !reflect.DeepEqual(*dbCacheConfig, readDBCacheConfig) + mutexCacheConfig.Unlock() + + if configChanged { + ClearCache() + } + + mutexCacheConfig.Lock() + dbCacheConfig = &readDBCacheConfig + mutexCacheConfig.Unlock() + } + return nil +} + +func (config *DBCacheConfig) handleReconfigureSignal() error { + mutexCacheConfig.Lock() + reconfigureCacheConfig = true + mutexCacheConfig.Unlock() + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Read DB Cache Configuration // +//////////////////////////////////////////////////////////////////////////////// + +func (config *DBCacheConfig) readFromDB() error { + fields, e := readRedis("TRANSLIB_DB|default") + if e != nil { + + config.PerConnection = defaultDBCacheConfig.PerConnection + config.Global = defaultDBCacheConfig.Global + config.CacheTables = make(map[string]bool, + len(defaultDBCacheConfig.CacheTables)) + for k, v := range defaultDBCacheConfig.CacheTables { + config.CacheTables[k] = v + } + + config.NoCacheTables = make(map[string]bool, + len(defaultDBCacheConfig.NoCacheTables)) + for k, v := range defaultDBCacheConfig.NoCacheTables { + config.NoCacheTables[k] = v + } + + config.CacheMaps = make(map[string]bool, + len(defaultDBCacheConfig.CacheMaps)) + for k, v := range defaultDBCacheConfig.CacheMaps { + config.CacheMaps[k] = v + } + + config.NoCacheMaps = make(map[string]bool, + len(defaultDBCacheConfig.NoCacheMaps)) + for k, v := range defaultDBCacheConfig.NoCacheMaps { + config.NoCacheMaps[k] = v + } + + } else { + for k, v := range fields { + switch { + case k == "per_connection_cache" && v == "True": + config.PerConnection = true + case k == "per_connection_cache" && v == "False": + config.PerConnection = false + case k == "global_cache" && v == "True": + config.Global = true + case k == "global_cache" && v == "False": + config.Global = false + case k == "@tables_cache": + l := strings.Split(v, ",") + config.CacheTables = make(map[string]bool, len(l)) + for _, t := range l { + config.CacheTables[t] = true + } + case k == "@no_tables_cache": + l := strings.Split(v, ",") + config.NoCacheTables = make(map[string]bool, len(l)) + for _, t := range l { + config.NoCacheTables[t] = true + } + case k == "@maps_cache": + l := strings.Split(v, ",") + config.CacheMaps = make(map[string]bool, len(l)) + for _, t := range l { + config.CacheMaps[t] = true + } + case k == "@no_maps_cache": + l := strings.Split(v, ",") + config.NoCacheMaps = make(map[string]bool, len(l)) + for _, t := range l { + config.NoCacheMaps[t] = true + } + } + } + } + return e +} + +func (config *DBCacheConfig) isCacheTable(name string) bool { + if (config.CacheTables[name] || (len(config.CacheTables) == 0)) && + !config.NoCacheTables["all"] && + !config.NoCacheTables[name] { + return true + } + return false +} + +func (config *DBCacheConfig) isCacheMap(name string) bool { + if (config.CacheMaps[name] || (len(config.CacheMaps) == 0)) && + !config.NoCacheMaps["all"] && + !config.NoCacheMaps[name] { + return true + } + return false +} diff --git a/translib/db/db_config.go b/translib/db/db_config.go index 0ec30615abbf..32e6ca9dd89b 100644 --- a/translib/db/db_config.go +++ b/translib/db/db_config.go @@ -25,6 +25,8 @@ import ( io "io/ioutil" "os" "strconv" + + "github.com/golang/glog" ) var dbConfigMap = make(map[string]interface{}) @@ -131,3 +133,29 @@ func getDbTcpAddr(dbName string) string { port := getDbPort(dbName) return hostname + ":" + strconv.Itoa(port) } + +func getDbSock(dbName string) string { + inst := getDbInst(dbName) + if unix_socket_path, ok := inst["unix_socket_path"]; ok { + return unix_socket_path.(string) + } else { + glog.V(4).Info("getDbSock: 'unix_socket_path' is not a valid field") + return "" + } +} + +func getDbPassword(dbName string) string { + inst := getDbInst(dbName) + password := "" + password_path, ok := inst["password_path"] + if !ok { + return password + } + data, er := io.ReadFile(password_path.(string)) + if er != nil { + // + } else { + password = (string(data)) + } + return password +} diff --git a/translib/db/db_get.go b/translib/db/db_get.go new file mode 100644 index 000000000000..c9a29750f9df --- /dev/null +++ b/translib/db/db_get.go @@ -0,0 +1,66 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2021 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "strings" + + "github.com/Azure/sonic-mgmt-common/translib/tlerr" + "github.com/golang/glog" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +// Get gets the value of the key +func (d *DB) Get(key string) (string, error) { + if glog.V(3) { + glog.Info("Get: Begin: key: ", key) + } + + if (d == nil) || (d.client == nil) { + return "", tlerr.TranslibDBConnectionReset{} + } + + // Only meant to retrieve metadata. + if !strings.HasPrefix(key, "CONFIG_DB") || strings.Contains(key, "|") { + return "", UseGetEntry + } + + // If pseudoDB then return not supported. TBD. + + glog.Info("Get: RedisCmd: ", d.Name(), ": ", "GET ", key) + val, e := d.client.Get(key).Result() + + if glog.V(3) { + glog.Info("Get: End: key: ", key, " val: ", val, " e: ", e) + } + + return val, e +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Functions // +//////////////////////////////////////////////////////////////////////////////// diff --git a/translib/db/db_get_config.go b/translib/db/db_get_config.go new file mode 100644 index 000000000000..29e83db42abe --- /dev/null +++ b/translib/db/db_get_config.go @@ -0,0 +1,310 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2022 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "github.com/go-redis/redis/v7" + "github.com/golang/glog" + "github.com/kylelemons/godebug/pretty" +) + +type GetConfigOptions struct { + ScanCountHint int64 // Hint of redis work required for Scan + AllowWritable bool // Allow on writable enabled DB object too +} + +// GetConfig API to get some tables. Very expensive operation (time/resources) +// If len(tables) == 0, return all tables in the CONFIG_DB +// Notes: +// - Only supported on the CONFIG_DB +// - The keys of the table should not contain TableSeparator, or KeySeparator +// (CONFIG_DB table keys should not contain "|" (defaults for TableSeparator, +// or KeySeparator)) +// - Only supported when write/set is disabled [IsWriteDisabled == true ] +// - OnChange not supported [IsEnableOnChange == false] +// - PCC (per_connection_cache) is not supported, and it will log an error/ +// warning. +func (d *DB) GetConfig(tables []*TableSpec, opt *GetConfigOptions) (map[TableSpec]Table, error) { + + if glog.V(3) { + glog.Infof("GetConfig: Begin: tables: %v, opt: %+v", tables, opt) + } + + if d.Opts.DBNo != ConfigDB { + err := SupportsCfgDBOnly + glog.Error("GetConfig: error: ", err) + return nil, err + } + + allowWritable := opt != nil && opt.AllowWritable + if !d.Opts.IsWriteDisabled && !allowWritable { + err := SupportsReadOnly + glog.Error("GetConfig: error: ", err) + return nil, err + } + + if d.Opts.IsOnChangeEnabled { + err := OnChangeNoSupport + glog.Error("GetConfig: error: ", err) + return nil, err + } + + if d.dbCacheConfig.PerConnection { + glog.Warning("GetConfig: Per Connection Cache not supported") + } + + // Filter on tables: This is optimized for 1 table. Filtering on multiple + // tables can be optimized, however, it needs some glob pattern + // manufacturing feasibility. All tables is the only requirement currently, + // therefore not considering further optimization. + pattern := Key{Comp: []string{"*"}} + var ts *TableSpec + var tsM map[TableSpec]bool + if len(tables) == 1 { + + ts = tables[0] + tsM = map[TableSpec]bool{*ts: true} + + } else { + + ts = &(TableSpec{Name: "*"}) + + // Create a map of requested tables, for faster comparision + if len(tables) > 1 { + tsM = make(map[TableSpec]bool, len(tables)) + for _, ts := range tables { + tsM[*ts] = true + } + } + } + + scanCountHint := int64(defaultGetTablesSCCountHint) + if (opt != nil) && (opt.ScanCountHint != 0) { + scanCountHint = opt.ScanCountHint + } + scOpts := ScanCursorOpts{ + AllowWritable: allowWritable, + CountHint: scanCountHint, + AllowDuplicates: true, + ScanType: KeyScanType, // Default + } + + sc, err := d.NewScanCursor(ts, pattern, &scOpts) + if err != nil { + return nil, err + } + defer sc.DeleteScanCursor() + + tblM := make(map[TableSpec]Table, InitialTablesCount) + + for scanComplete := false; !scanComplete; { + var redisKeys []string + redisKeys, scanComplete, err = sc.GetNextRedisKeys(&scOpts) + if err != nil { + // GetNextRedisKeys already logged the error + return nil, err + } + + if glog.V(4) { + glog.Infof("GetConfig: %v #redisKeys, scanComplete %v", + len(redisKeys), scanComplete) + } + + // Initialize the pipeline + pipe := d.client.Pipeline() + + tss := make([]*TableSpec, 0, len(redisKeys)) + presults := make([]*redis.StringStringMapCmd, 0, len(redisKeys)) + keys := make([]Key, 0, len(redisKeys)) + + for index, redisKey := range redisKeys { + if glog.V(6) { + glog.Infof("GetConfig: redisKeys[%d]: %v", index, redisKey) + } + + // Keys with no (Table|Key)Separator in them would not be selected + // due to the "*|*" pattern search. So, no need to select them. + + rKts, key := d.redis2ts_key(redisKey) + + // Do the table filtering here, since redis glob style patterns + // cannot handle multiple tables matching. + if len(tables) > 1 { + if present, ok := tsM[rKts]; !ok || !present { + tss = append(tss, nil) + keys = append(keys, Key{}) + presults = append(presults, nil) + continue + } + } + + tss = append(tss, &rKts) + keys = append(keys, key) + presults = append(presults, pipe.HGetAll(redisKey)) + } + + if glog.V(3) { + glog.Info("GetConfig: #tss: ", len(tss), ", #presults: ", + len(presults), ", #keys: ", len(keys)) + } + + // Execute the Pipeline + if glog.V(3) { + glog.Info("GetConfig: RedisCmd: ", d.Name(), ": ", "pipe.Exec") + } + _, err = pipe.Exec() // Ignore returned Cmds. If any err, log it. + + // Close the Pipeline + pipe.Close() + + if err != nil { + glog.Error("GetConfig: pipe.Exec() err: ", err) + return nil, err + } + + // Iterate the returned array of Values to create tblM[] + for index, redisKey := range redisKeys { + if glog.V(6) { + glog.Infof("GetConfig: tblM[] redisKeys[%d]: %v", index, + redisKey) + } + + result := presults[index] + + if tss == nil { + continue + } + + if result == nil { + glog.Warningf("GetConfig: redisKeys[%d]: %v nil", index, + redisKey) + continue + } + + field, err := result.Result() + if err != nil { + glog.Warningf("GetConfig: redisKeys[%d]: %v err", index, err) + continue + } + + dbValue := Value{Field: field} + + // Create Table in map if not created + ts := *(tss[index]) + if _, ok := tblM[ts]; !ok { + tblM[ts] = Table{ + ts: &ts, + entry: make(map[string]Value, InitialTableEntryCount), + complete: true, + db: d, + } + } + + tblM[ts].entry[redisKey] = dbValue + } + } + + if allowWritable { + err = d.applyTxCache(tblM, tsM) + if err != nil { + glog.V(2).Info("GetConfig: applyTxCache failed: ", err) + return nil, err + } + } + + if glog.V(3) { + glog.Infof("GetConfig: End: #tblM: %v", len(tblM)) + } + if glog.V(6) { + for ts, table := range tblM { + glog.Infof("GetConfig: #entry in tblM[%v] = %v", ts.Name, + len(table.entry)) + if glog.V(8) { + glog.Infof("GetConfig: pretty entry in tblM[%v] = \n%v", + ts.Name, pretty.Sprint(table.entry)) + } + } + } + + return tblM, nil +} + +func (d *DB) applyTxCache(data map[TableSpec]Table, tableFilter map[TableSpec]bool) error { + for _, cmd := range d.txCmds { + cmdTs := TableSpec{Name: cmd.ts.Name} // to match the TableSpec created from redis key + if len(tableFilter) != 0 && !tableFilter[cmdTs] { + continue + } + + if cmd.op != txOpHMSet { + if _, tableFound := data[cmdTs]; !tableFound { + continue + } + } + + keyStr := d.key2redis(&cmdTs, *cmd.key) + keyDelete := false + + switch cmd.op { + case txOpHMSet: + if table, tableFound := data[cmdTs]; !tableFound { + data[cmdTs] = Table{ + ts: &cmdTs, + entry: map[string]Value{keyStr: cmd.value.Copy()}, + complete: true, + db: d, + } + } else if entry, keyFound := table.entry[keyStr]; keyFound { + for fName, fVal := range cmd.value.Field { + entry.Set(fName, fVal) + } + } else { + table.entry[keyStr] = cmd.value.Copy() + } + case txOpHDel: + if entry, keyFound := data[cmdTs].entry[keyStr]; keyFound { + for fName := range cmd.value.Field { + entry.Remove(fName) + } + keyDelete = !entry.IsPopulated() + } + case txOpDel: + keyDelete = true + } + + if keyDelete { + table := data[cmdTs] + delete(table.entry, keyStr) + if len(table.entry) == 0 { + delete(data, cmdTs) + } + } + } + + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Constants // +//////////////////////////////////////////////////////////////////////////////// + +const ( + defaultGetTablesSCCountHint = 100 +) diff --git a/translib/db/db_get_config_test.go b/translib/db/db_get_config_test.go new file mode 100644 index 000000000000..04a5b621c074 --- /dev/null +++ b/translib/db/db_get_config_test.go @@ -0,0 +1,254 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2022 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func BenchmarkGetConfig(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, e := db.GetConfig(nil, nil); e != nil { + b.Errorf("GetConfig() returns err: %v", e) + } + } +} + +func TestGetConfig(t *testing.T) { + if _, e := db.GetConfig(nil, nil); e != nil { + t.Errorf("GetConfig() returns err: %v", e) + } +} + +func TestGetConfigSingular(t *testing.T) { + if _, e := db.GetConfig([]*TableSpec{&ts}, nil); e != nil { + t.Errorf("GetConfig() returns err: %v", e) + } +} + +func TestGetConfigAllTables(t *testing.T) { + verifyGetConfigAllTables(t, db, nil) +} + +func verifyGetConfigAllTables(t *testing.T, db *DB, opts *GetConfigOptions) { + tablesM, e := db.GetConfig([]*TableSpec{}, opts) + if e != nil { + t.Errorf("GetTablePattern() returns err: %v", e) + } + + table, e := db.GetTable(&ts) + if e != nil { + t.Errorf("GetTable() returns err: %v", e) + } + + table.patterns = nil // because GetConfig() does not populate patterns + if !reflect.DeepEqual(tablesM[ts], table) { + fmt.Println("\ntable: \n", table) + fmt.Println("\ntablesM[ts]: \n", tablesM[ts]) + t.Errorf("GetTable(ts) != GetConfig()[ts]") + } + + // Count the keys in all the tables + tsM := make(map[TableSpec]int, 10) + redisKeys, e := db.client.Keys("*").Result() + if e != nil { + t.Errorf("client.Keys() returns err: %v", e) + } + + for _, redisKey := range redisKeys { + + // Does it have a Separator? + if strings.IndexAny(redisKey, db.Opts.TableNameSeparator+ + db.Opts.KeySeparator) == -1 { + + continue + } + + ts, _ := db.redis2ts_key(redisKey) + tsM[ts]++ + } + + if len(tsM) != len(tablesM) { + fmt.Println("\n#tsM: \n", len(tsM)) + fmt.Println("\n#tablesM: \n", len(tablesM)) + t.Errorf("#GetConfig() != #Tables") + } + + for ts, table := range tablesM { + tableComp, e := db.GetTable(&ts) + if e != nil { + t.Errorf("GetTable(%v) returns err: %v", ts, e) + } + + tableComp.patterns = nil // because GetConfig() does not populate patterns + if !reflect.DeepEqual(table, tableComp) { + fmt.Println("\ntable: \n", table) + fmt.Println("\ntableComp: \n", tableComp) + t.Errorf("Detail: GetTable(%q) != GetConfig()[%q]", ts.Name, ts.Name) + } + } +} + +func TestGetConfig_writable(t *testing.T) { + d, err := newDB(ConfigDB) + if err != nil { + t.Fatal("newDB() failed;", err) + } + defer d.DeleteDB() + + // GetConfig() should fail with nil options + t.Run("nilOpts", func(tt *testing.T) { + _, err = d.GetConfig([]*TableSpec{}, nil) + if err == nil { + tt.Errorf("GetConfig() with nil options should have failed on writable DB") + } + }) + + // GetConfig() should fail with AllowWritable=false + t.Run("AllowWritable=false", func(tt *testing.T) { + _, err = d.GetConfig([]*TableSpec{}, &GetConfigOptions{AllowWritable: false}) + if err == nil { + tt.Errorf("GetConfig() with AllowWritable=false should have failed on writable DB") + } + }) + + // GetConfig() should work with AllowWritable=true + t.Run("AllowWritable=true", func(tt *testing.T) { + verifyGetConfigAllTables(tt, d, &GetConfigOptions{AllowWritable: true}) + }) +} + +func TestGetConfig_txCache(t *testing.T) { + d, err := newDB(ConfigDB) + if err != nil { + t.Fatal("newDB() failed;", err) + } + defer d.DeleteDB() + + if err = d.StartTx(nil, nil); err != nil { + t.Fatal("StartTx() failed; ", err) + } + defer d.AbortTx() + + // We'll perform few operations on 'ts' table to populate transaction cache. + // testTableSetup() would have already populated test entries during init + newKey := Key{Comp: []string{"__A_NEW_KEY__"}} + modKey := Key{Comp: []string{"KEY1"}} + delKey := Key{Comp: []string{"KEY2"}} + + // Value for HMSET; used by both key create and modify steps + testValue := Value{Field: map[string]string{"GetConfigTestField": "foo"}} + + // Load the existing value of modKey; pick a random field for HDEL + oldVal, _ := d.GetEntry(&ts, modKey) + delFields := Value{Field: map[string]string{}} + for field := range oldVal.Field { + delFields.Set(field, "") + break + } + + // Perform db operations + d.SetEntry(&ts, newKey, testValue) + d.ModEntry(&ts, modKey, testValue) + d.DeleteEntryFields(&ts, modKey, delFields) + d.DeleteEntry(&ts, delKey) + + // Run GetConfig() for 'ts' table only + opts := &GetConfigOptions{AllowWritable: true} + tsConfig, err := d.GetConfig([]*TableSpec{&ts}, opts) + if err != nil { + t.Fatalf("GetConfig([%q]) failed; %v", ts.Name, err) + } + tsTable := tsConfig[ts] + if tsTable.ts == nil || len(tsConfig) != 1 { + t.Fatalf("GetConfig([%q]) returned incorrect data; %v", ts.Name, tsConfig) + } + + // Check value for newly created key + t.Run("newKey", func(tt *testing.T) { + val, _ := tsTable.GetEntry(newKey) + if !reflect.DeepEqual(val, testValue) { + tt.Errorf("tsTable contains wrong value for new key %q", newKey.Comp[0]) + tt.Errorf("Expected: %v", testValue) + tt.Errorf("Received: %v", val) + } + }) + + // Check value for modified key + t.Run("modKey", func(tt *testing.T) { + expValue := oldVal.Copy() + for f := range delFields.Field { + expValue.Remove(f) + } + for f, v := range testValue.Field { + expValue.Set(f, v) + } + + val, _ := tsTable.GetEntry(modKey) + if !reflect.DeepEqual(val, expValue) { + tt.Errorf("tsTable contains wrong value for the modified key %q", modKey.Comp[0]) + tt.Errorf("Expected: %v", expValue) + tt.Errorf("Received: %v", val) + } + }) + + // Check deleted key is not present in the response + t.Run("delKey", func(tt *testing.T) { + val, _ := tsTable.GetEntry(delKey) + if val.IsPopulated() { + tt.Errorf("tsTable contains value for deleted key %s!", delKey.Comp[0]) + } + }) + + // Try GetConfig() for all tables.. + t.Run("allTables", func(tt *testing.T) { + verifyGetConfigAllTables(tt, d, opts) + }) + + // Check adding and deleting same key multiple times. + t.Run("addDelTable", func(tt *testing.T) { + newTable := TableSpec{Name: "__A_NEW_TABLE__"} + for i := 0; i < 10; i++ { + d.SetEntry(&newTable, newKey, testValue) + d.DeleteEntry(&newTable, newKey) + } + verifyGetConfigTableNotFound(tt, d, newTable) + }) + + // Check deleting all keys of an existing table + t.Run("delTable", func(tt *testing.T) { + d.DeleteTable(&ts) + verifyGetConfigTableNotFound(tt, d, ts) + }) +} + +func verifyGetConfigTableNotFound(t *testing.T, d *DB, ts TableSpec) { + opts := &GetConfigOptions{AllowWritable: true} + allTables, err := d.GetConfig([]*TableSpec{}, opts) + if err != nil { + t.Fatalf("GetConfig() failed; err=%v", err) + } + if table, ok := allTables[ts]; ok { + t.Fatalf("GetConfig() returned stale entries for the deleted table %q;\n%v", ts.Name, table) + } +} diff --git a/translib/db/db_get_test.go b/translib/db/db_get_test.go new file mode 100644 index 000000000000..1bc242bd59b4 --- /dev/null +++ b/translib/db/db_get_test.go @@ -0,0 +1,90 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2022 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "os" + "strconv" + "testing" +) + +var GM_PF string = "DBGM_TST_" + strconv.FormatInt(int64(os.Getpid()), 10) +var gmTs *TableSpec = &TableSpec{Name: GM_PF + "RADIUS"} +var gmRK = GM_PF + "RADIUS|global_key" +var gmEntry Value = Value{Field: map[string]string{"auth_type": "pap"}} +var cdbUpdated string = "CONFIG_DB_UPDATED" + +func cleanupGM(t *testing.T, d *DB, deleteDB bool) { + if d == nil { + return + } + + if deleteDB { + defer d.DeleteDB() + } + + d.DeleteEntry(gmTs, d.redis2key(gmTs, gmRK)) +} + +func TestGetMeta(t *testing.T) { + + d, e := newDB(ConfigDB) + if e != nil { + t.Errorf("newDB() fails e: %v", e) + } + + gmKey := d.redis2key(gmTs, gmRK) + + // Cleanup before starting + cleanupGM(t, d, false) + + // Register CleanUp Function + t.Cleanup(func() { cleanupGM(t, d, true) }) + + e = d.StartTx(nil, nil) + + if e != nil { + t.Errorf("StartTx() fails e = %v", e) + } + + if e = d.SetEntry(gmTs, gmKey, gmEntry); e != nil { + t.Fatalf("d.SetEntry(%v,%v,%v) fails e: %v", gmTs, gmKey, gmEntry, e) + } + + e = d.CommitTx() + + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + } + + if s, e := d.Get(cdbUpdated); e != nil { + t.Errorf("d.Get(%s) fails e: %v", cdbUpdated, e) + } else if len(s) <= 1 { + t.Errorf("d.Get(%s) returns: %s", cdbUpdated, s) + } + + if _, e := d.Get("RANDOM_KEY"); e == nil { + t.Errorf("d.Get(%s) succeeds!", "RANDOM_KEY") + } + + if _, e := d.Get("CONFIG_DB_TABLE|global"); e == nil { + t.Errorf("d.Get(%s) succeeds!", "CONFIG_DB_TABLE|global") + } +} diff --git a/translib/db/db_map.go b/translib/db/db_map.go new file mode 100644 index 000000000000..dfd2c677c377 --- /dev/null +++ b/translib/db/db_map.go @@ -0,0 +1,116 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2020 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "github.com/Azure/sonic-mgmt-common/translib/tlerr" + "github.com/golang/glog" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +type MAP struct { + ts *TableSpec + mapMap map[string]string + complete bool + db *DB +} + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +// GetMAP gets the entire Map. +func (d *DB) GetMAP(ts *TableSpec) (MAP, error) { + if glog.V(3) { + glog.Info("GetMAP: Begin: ts: ", ts) + } + + if (d == nil) || (d.client == nil) { + return MAP{}, tlerr.TranslibDBConnectionReset{} + } + + var mapObj MAP + + v, e := d.GetMapAll(ts) + if e == nil { + mapObj = MAP{ + ts: ts, + complete: true, + mapMap: v.Field, + db: d, + } + } + + /* + v, e := d.client.HGetAll(ts.Name).Result() + + if len(v) != 0 { + mapObj.mapMap = v + } else { + if glog.V(1) { + glog.Info("GetMAP: HGetAll(): empty map") + } + mapObj = MAP{} + e = tlerr.TranslibRedisClientEntryNotExist { Entry: ts.Name } + } + */ + + if glog.V(3) { + glog.Info("GetMAP: End: MAP: ", mapObj) + } + + return mapObj, e +} + +func (m *MAP) GetMap(mapKey string) (string, error) { + if glog.V(3) { + glog.Info("MAP.GetMap: Begin: ", " mapKey: ", mapKey) + } + + var e error + res, ok := m.mapMap[mapKey] + if !ok { + e = tlerr.TranslibRedisClientEntryNotExist{Entry: m.ts.Name} + } + + if glog.V(3) { + glog.Info("MAP.GetMap: End: ", "res: ", res, " e: ", e) + } + + return res, e +} + +func (m *MAP) GetMapAll() (Value, error) { + + if glog.V(3) { + glog.Info("MAP.GetMapAll: Begin: ") + } + + v := Value{Field: m.mapMap} // TBD: This is a reference + + if glog.V(3) { + glog.Info("MAP.GetMapAll: End: ", "v: ", v) + } + + return v, nil +} diff --git a/translib/db/db_opts_to_test.go b/translib/db/db_opts_to_test.go new file mode 100644 index 000000000000..e78a85952d3b --- /dev/null +++ b/translib/db/db_opts_to_test.go @@ -0,0 +1,150 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2022 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + // "context" + + // "fmt" + // "errors" + // "flag" + // "github.com/golang/glog" + + // "github.com/Azure/sonic-mgmt-common/translib/tlerr" + // "os/exec" + "os" + // "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/go-redis/redis/v7" +) + +func TestDefaultTimeout(t *testing.T) { + + var pid int = os.Getpid() + + t.Logf("TestDefaultTimeout: %s: begin", time.Now().String()) + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + IsWriteDisabled: true, + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + } + + // Run a blocking LUA script on it for ~ 5 seconds + var wg sync.WaitGroup + wg.Add(1) + go blockLUAScript(&wg, 5, t) + + t.Logf("TestDefaultTimeout: %s: Sleep(1) for LUA...", time.Now().String()) + time.Sleep(time.Second) + t.Logf("TestDefaultTimeout: %s: call GetEntry()", time.Now().String()) + + // Do GetEntry + ts := TableSpec{Name: DBPAT_TST_PREFIX + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + + _, e = d.GetEntry(&ts, akey) + + // Confirm the Network Timeout Error + if e != nil { + t.Logf("GetEntry() got error e = %v", e) + s := e.Error() + if !(strings.HasPrefix(s, "i/o timeout") || + strings.HasPrefix(s, "BUSY")) { + t.Errorf("GetEntry() Expecting timeout, BUSY... e = %v", e) + } + } else { + t.Errorf("GetEntry() should have failed") + } + + t.Logf("TestDefaultTimeout: %s: Wait for LUA...", time.Now().String()) + + // Wait for the LUA script to return + wg.Wait() + + t.Logf("TestDefaultTimeout: %s: Sleep(20s)...", time.Now().String()) + + // Sleep In case we got a i/o timeout + time.Sleep(20 * time.Second) + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } + + t.Logf("TestDefaultTimeout: %s: end", time.Now().String()) +} + +func blockLUAScript(wg *sync.WaitGroup, secs int, t *testing.T) { + + defer wg.Done() + + t.Logf("blockLUAScript: %s: begin: secs: %v", time.Now().String(), secs) + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + IsWriteDisabled: true, + DisableCVLCheck: true, + }) + + if e != nil { + t.Errorf("blockLUAScript: NewDB() fails e = %v", e) + } + + // LUA does not have a sleep(), so empirical calculations. + luaScript := redis.NewScript(` +local i = tonumber(ARGV[1]) * 5150000 +while (i > 0) do + local res = redis.call('GET', 'RANDOM_KEY') + i=i-1 +end +return i +`) + + if _, e := luaScript.Run(d.client, []string{}, secs).Int(); e != nil { + t.Logf("blockLUAScript: luaScript.Run() fails e = %v", e) + } + + t.Logf("blockLUAScript: %s: end: secs: %v", time.Now().String(), secs) + + t.Logf("blockLUAScript: %s: Sleep(2s)...", time.Now().String()) + time.Sleep(2 * time.Second) + + if e = d.DeleteDB(); e != nil { + t.Errorf("blockLUAScript: DeleteDB() fails e = %v", e) + } +} diff --git a/translib/db/db_redis_opts.go b/translib/db/db_redis_opts.go new file mode 100644 index 000000000000..023fd6072eed --- /dev/null +++ b/translib/db/db_redis_opts.go @@ -0,0 +1,301 @@ +/////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2022 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "errors" + "flag" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v7" + "github.com/golang/glog" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +// Internal Types // +//////////////////////////////////////////////////////////////////////////////// + +type _DBRedisOptsConfig struct { + opts redis.Options +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Functions // +//////////////////////////////////////////////////////////////////////////////// + +var dbRedisOptsConfig *_DBRedisOptsConfig = &_DBRedisOptsConfig{} + +var reconfigureRedisOptsConfig = true // Signal Received, or initialization + +var mutexRedisOptsConfig sync.Mutex + +var goRedisOpts string // Command line options + +var goRedisOptsOnce sync.Once // Command line options handled only once + +func setGoRedisOpts(optsString string) { + // Command Line Options have higher priority on startup. After that, on + // receipt of a signal (SIGUSR2?), the TRANSLIB_DB|default + // "go_redis_opts" will have higher priority. + if optsString != "" { + glog.Infof("setGoRedisOpts: optsString: %s", optsString) + // On startup, command-line has priority. Skip reconfigure from DB + reconfigureRedisOptsConfig = false + dbRedisOptsConfig.parseRedisOptsConfig(optsString) + } +} + +// adjustRedisOpts() gets the redis.Options to be set based on command line +// options, values passed via db Options, and TRANSLIB_DB|default settings. +// Additionally it also adjusts the passed dbOpts for separator. +func adjustRedisOpts(dbOpt *Options) *redis.Options { + dbRedisOptsConfig.reconfigure() + mutexRedisOptsConfig.Lock() + redisOpts := dbRedisOptsConfig.opts + mutexRedisOptsConfig.Unlock() + + var dbSock string + var dbNetwork string + addr := DefaultRedisLocalTCPEP + dbId := int(dbOpt.DBNo) + dbPassword := "" + if dbInstName := getDBInstName(dbOpt.DBNo); dbInstName != "" { + if isDbInstPresent(dbInstName) { + if dbSock = getDbSock(dbInstName); dbSock != "" { + dbNetwork = DefaultRedisUNIXNetwork + addr = dbSock + } else { + dbNetwork = DefaultRedisTCPNetwork + addr = getDbTcpAddr(dbInstName) + } + dbId = getDbId(dbInstName) + dbSepStr := getDbSeparator(dbInstName) + dbPassword = getDbPassword(dbInstName) + if len(dbSepStr) > 0 { + if len(dbOpt.TableNameSeparator) > 0 && + dbOpt.TableNameSeparator != dbSepStr { + glog.Warningf("TableNameSeparator '%v' in"+ + " the Options is different from the"+ + " one configured in the Db config. file for the"+ + " Db name %v", dbOpt.TableNameSeparator, dbInstName) + } + dbOpt.KeySeparator = dbSepStr + dbOpt.TableNameSeparator = dbSepStr + } else { + glog.Warning("Database Separator not present for the Db name: ", + dbInstName) + } + } else { + glog.Warning("Database instance not present for the Db name: ", + dbInstName) + } + } else { + glog.Errorf("Invalid database number %d", dbId) + } + + redisOpts.Network = dbNetwork + redisOpts.Addr = addr + redisOpts.Password = dbPassword + redisOpts.DB = dbId + + // redisOpts.DialTimeout = 0 // Default + + // Default 3secs read & write timeout was not sufficient in high CPU load + // on certain platforms. Hence increasing to 10secs. (via command-line + // options). Setting read-timeout is sufficient; internally go-redis + // updates the same for write-timeout as well. + // redisOpts.ReadTimeout = 10 * time.Second // Done via command-line + + // For Transactions, limit the pool, if the options haven't over-ridden it. + if redisOpts.PoolSize == 0 { + redisOpts.PoolSize = 1 + } + // Each DB gets it own (single) connection. + + return &redisOpts +} + +func init() { + flag.StringVar(&goRedisOpts, "go_redis_opts", "", "Options for go-redis") +} + +//////////////////////////////////////////////////////////////////////////////// +// Configure DB Redis Opts // +//////////////////////////////////////////////////////////////////////////////// + +func (config *_DBRedisOptsConfig) reconfigure() error { + + mutexRedisOptsConfig.Lock() + // Handle command line options after they are parsed. + if flag.Parsed() { + goRedisOptsOnce.Do(func() { + setGoRedisOpts(goRedisOpts) + }) + } else { + glog.Warningf("_DBRedisOptsConfig:reconfigure: flags not parsed!") + } + + var doReconfigure bool = reconfigureRedisOptsConfig + if reconfigureRedisOptsConfig { + reconfigureRedisOptsConfig = false + } + mutexRedisOptsConfig.Unlock() + + if doReconfigure { + glog.Infof("_DBRedisOptsConfig:reconfigure: Handling signal.") + var readDBRedisOptsConfig _DBRedisOptsConfig + readDBRedisOptsConfig.readFromDB() + + mutexRedisOptsConfig.Lock() + if !reflect.DeepEqual(*config, readDBRedisOptsConfig) { + glog.Infof("_DBRedisOptsConfig:reconfigure: Change Detected.") + dbRedisOptsConfig = &readDBRedisOptsConfig + } + mutexRedisOptsConfig.Unlock() + } + return nil +} + +func (config *_DBRedisOptsConfig) handleReconfigureSignal() error { + mutexRedisOptsConfig.Lock() + reconfigureRedisOptsConfig = true + mutexRedisOptsConfig.Unlock() + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Read DB Redis Options Configuration // +//////////////////////////////////////////////////////////////////////////////// + +func (config *_DBRedisOptsConfig) readFromDB() error { + fields, e := readRedis("TRANSLIB_DB|default") + if e == nil { + if optsString, ok := fields["go_redis_opts"]; ok { + // Parse optsString into config.opts + config.parseRedisOptsConfig(optsString) + } + } + return e +} + +func (config *_DBRedisOptsConfig) parseRedisOptsConfig(optsString string) error { + var e, optSAErr error + var intVal int64 + var eS string + + glog.Infof("parseRedisOptsConfig: optsString: %s", optsString) + + // First zero the config redis.Options, in case there is any existing + // stale configuration. + config.opts = redis.Options{} + + // This could be optimized using reflection, if the # of options grows + for optI, optS := range strings.Split(optsString, ",") { + glog.Infof("parseRedisOptsConfig: optI: %d optS: %s", optI, optS) + if optSA := strings.Split(optS, "="); len(optSA) > 1 { + switch optSA[0] { + case "MaxRetries": + if intVal, optSAErr = strconv.ParseInt(optSA[1], 0, 64); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } else { + config.opts.MaxRetries = int(intVal) + } + case "MinRetryBackoff": + if config.opts.MinRetryBackoff, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "MaxRetryBackoff": + if config.opts.MaxRetryBackoff, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "DialTimeout": + if config.opts.DialTimeout, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "ReadTimeout": + if config.opts.ReadTimeout, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "WriteTimeout": + if config.opts.WriteTimeout, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "PoolSize": + if intVal, optSAErr = strconv.ParseInt(optSA[1], 0, 64); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } else { + config.opts.PoolSize = int(intVal) + } + case "MinIdleConns": + if intVal, optSAErr = strconv.ParseInt(optSA[1], 0, 64); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } else { + config.opts.MinIdleConns = int(intVal) + } + case "MaxConnAge": + if config.opts.MaxConnAge, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "PoolTimeout": + if config.opts.PoolTimeout, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "IdleTimeout": + if config.opts.IdleTimeout, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + case "IdleCheckFrequency": + if config.opts.IdleCheckFrequency, optSAErr = + time.ParseDuration(optSA[1]); optSAErr != nil { + eS += ("Parse Error: " + optSA[0] + " :" + optSAErr.Error()) + } + default: + eS += ("Unknown Redis Option: " + optSA[0] + " ") + } + } + } + + if len(eS) != 0 { + glog.Errorf("parseRedisOptsConfig: Unknown: %s", eS) + e = errors.New(eS) + } + + return e +} diff --git a/translib/db/db_redis_opts_test.go b/translib/db/db_redis_opts_test.go new file mode 100644 index 000000000000..865917b66b16 --- /dev/null +++ b/translib/db/db_redis_opts_test.go @@ -0,0 +1,97 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2022 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "reflect" + "testing" + "time" + + "github.com/go-redis/redis/v7" +) + +func TestSetGoRedisOpts(t *testing.T) { + + compareRedisOptsString2Struct(t, "ReadTimeout=10s", + &redis.Options{ReadTimeout: 10 * time.Second}) + compareRedisOptsString2Struct(t, "ReadTimeout=10s,WriteTimeout=11s", + &redis.Options{ReadTimeout: 10 * time.Second, WriteTimeout: 11 * time.Second}) + +} + +func TestReadFromDBRedisOpts(t *testing.T) { + + compareRedisOptsDBRead2Struct(t, "ReadTimeout=10s", + &redis.Options{ReadTimeout: 10 * time.Second}) + compareRedisOptsDBRead2Struct(t, "ReadTimeout=10s,WriteTimeout=11s", + &redis.Options{ReadTimeout: 10 * time.Second, WriteTimeout: 11 * time.Second}) + +} + +func compareRedisOptsString2Struct(t *testing.T, optsS string, opts *redis.Options) { + setGoRedisOpts(optsS) + if !reflect.DeepEqual(dbRedisOptsConfig.opts, *opts) { + t.Errorf("SetGoRedisOpts() mismatch (%s) != %+v", optsS, opts) + t.Errorf("New dbRedisOptsConfig.opts: %+v", dbRedisOptsConfig.opts) + } +} + +func compareRedisOptsDBRead2Struct(t *testing.T, optsS string, opts *redis.Options) { + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Fatalf("NewDB() fails e = %v", e) + } + + defer d.DeleteDB() + + // Do SetEntry + ts := TableSpec{Name: "TRANSLIB_DB"} + + key := make([]string, 1, 1) + key[0] = "default" + akey := Key{Comp: key} + + if oldValue, ge := d.GetEntry(&ts, akey); ge == nil { + defer d.SetEntry(&ts, akey, oldValue) + } + + value := make(map[string]string, 1) + value["go_redis_opts"] = optsS + avalue := Value{Field: value} + + if e = d.SetEntry(&ts, akey, avalue); e != nil { + t.Fatalf("SetEntry() fails e = %v", e) + } + + t.Logf("TestReadFromDBRedisOpts: handleReconfigureSignal()") + dbRedisOptsConfig.handleReconfigureSignal() + dbRedisOptsConfig.reconfigure() + if !reflect.DeepEqual(dbRedisOptsConfig.opts, *opts) { + t.Errorf("reconfigure() mismatch (%s) != %+v", optsS, opts) + t.Errorf("New dbRedisOptsConfig.opts: %+v", dbRedisOptsConfig.opts) + } +} diff --git a/translib/db/db_redis_pipe_test.go b/translib/db/db_redis_pipe_test.go new file mode 100644 index 000000000000..5d399b3a9821 --- /dev/null +++ b/translib/db/db_redis_pipe_test.go @@ -0,0 +1,558 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2021 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "os" + "reflect" + "strconv" + "testing" +) + +//func init() { +// flag.Set("alsologtostderr", fmt.Sprintf("%t", true)) +// var logLevel string +// flag.StringVar(&logLevel, "logLevel", "4", "test") +// flag.Lookup("v").Value.Set(logLevel) +//} + +func newDB(dBNum DBNum) (*DB, error) { + d, e := NewDB(Options{ + DBNo: dBNum, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + return d, e +} + +func deleteTableAndDb(d *DB, ts *TableSpec, t *testing.T) { + e := d.DeleteTable(ts) + + if e != nil { + t.Errorf("DeleteTable() fails e = %v", e) + return + } + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} + +func TestGetEntries1(t *testing.T) { + + var pid int = os.Getpid() + + d, e := newDB(ConfigDB) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + ca2 := make([]string, 1, 1) + ca2[0] = "MyACL2_ACL_IPVNOTEXIST" + akey2 := Key{Comp: ca2} + + // Add the Entries for Get|DeleteKeys + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys, e := d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 2) { + t.Errorf("GetKeys() fails e = %v, keys = %v", e, keys) + return + } + + e = d.DeleteKeys(&ts, Key{Comp: []string{"MyACL*_ACL_IPVNOTEXIST"}}) + + if e != nil { + t.Errorf("DeleteKeys() fails e = %v", e) + return + } + + // Add the Entries again for Table + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys, e = d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 2) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + + values, errors := d.GetEntries(&ts, keys) + t.Log("values: ", values) + t.Log("errors: ", errors) + + for _, value := range values { + if reflect.DeepEqual(value, avalue) { + continue + } else { + t.FailNow() + } + } + + if errors != nil { + t.FailNow() + } + + deleteTableAndDb(d, &ts, t) +} + +// to test by giving the duplicate key +func TestGetEntries2(t *testing.T) { + + var pid int = os.Getpid() + + d, e := newDB(ConfigDB) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + ca2 := make([]string, 1, 1) + ca2[0] = "MyACL2_ACL_IPVNOTEXIST" + akey2 := Key{Comp: ca2} + + // Add the Entries for Get|DeleteKeys + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys, e := d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 2) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + + e = d.DeleteKeys(&ts, Key{Comp: []string{"MyACL*_ACL_IPVNOTEXIST"}}) + + if e != nil { + t.Errorf("DeleteKeys() fails e = %v", e) + return + } + + // Add the Entries again for Table + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys = make([]Key, 0) + keys = append(keys, akey) + keys = append(keys, akey) + + values, errors := d.GetEntries(&ts, keys) + t.Log("values: ", values) + t.Log("errors: ", errors) + + if len(values) != 2 { + t.FailNow() + } + + t.Log("avalue ==> : ", avalue) + + for idx, value := range values { + if reflect.DeepEqual(value, avalue) { + continue + } else { + t.Log("value not matching for the key: ", keys[idx]) + t.FailNow() + } + } + + deleteTableAndDb(d, &ts, t) +} + +// to test the errors slice by giving one of the invalid key +func TestGetEntries3(t *testing.T) { + + var pid int = os.Getpid() + + d, e := newDB(ConfigDB) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + ca2 := make([]string, 1, 1) + ca2[0] = "MyACL2_ACL_IPVNOTEXIST" + akey2 := Key{Comp: ca2} + + // Add the Entries for Get|DeleteKeys + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys, e := d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 2) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + + e = d.DeleteKeys(&ts, Key{Comp: []string{"MyACL*_ACL_IPVNOTEXIST"}}) + + if e != nil { + t.Errorf("DeleteKeys() fails e = %v", e) + return + } + + // Add the Entries again for Table + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + ca3 := make([]string, 1, 1) + ca3[0] = "KEY_NOT_EXIST" + akey3 := Key{Comp: ca3} + + keys = make([]Key, 0) + keys = append(keys, akey) + keys = append(keys, akey) + keys = append(keys, akey3) + + values, errors := d.GetEntries(&ts, keys) + t.Log("values: ", values) + t.Log("errors: ", errors) + + if errors != nil && errors[2] != nil { + t.Log("Error received correctly for the key ", keys[2], "; error: ", errors[2]) + } else { + t.Log("Error not getting received for the key: ", keys[2]) + t.FailNow() + } + + deleteTableAndDb(d, &ts, t) +} + +// To test cache hit by enabling the PerConnection, CacheTables +func TestGetEntries4(t *testing.T) { + + var pid int = os.Getpid() + + d, e := newDB(ConfigDB) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + d.dbCacheConfig.PerConnection = true + d.dbCacheConfig.CacheTables[ts.Name] = true + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + ca2 := make([]string, 1, 1) + ca2[0] = "MyACL2_ACL_IPVNOTEXIST" + akey2 := Key{Comp: ca2} + + // Add the Entries for Get|DeleteKeys + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + avalue2 := Value{map[string]string{"ports@": "Ethernet1", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey2, avalue2) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys, e := d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 2) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + + e = d.DeleteKeys(&ts, Key{Comp: []string{"MyACL*_ACL_IPVNOTEXIST"}}) + + if e != nil { + t.Errorf("DeleteKeys() fails e = %v", e) + return + } + + // Add the Entries again for Table + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue2) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + d.dbStatsConfig.TableStats = true + d.dbStatsConfig.TimeStats = true + + keys = make([]Key, 0) + keys = append(keys, akey) + keys = append(keys, akey2) + + values, errors := d.GetEntries(&ts, keys) + t.Log("values: ", values) + t.Log("errors: ", errors) + + stats := d.stats.Tables[ts.Name] + t.Log("stats.GetEntryCacheHits: ", stats.GetEntryCacheHits) + t.Log("stats.GetEntriesHits: ", stats.GetEntriesHits) + t.Log("stats.GetEntriesPeak: ", stats.GetEntriesPeak) + t.Log("stats.GetEntriesTime: ", stats.GetEntriesTime) + + values, errors = d.GetEntries(&ts, keys) + t.Log("values: ", values) + t.Log("errors: ", errors) + + stats = d.stats.Tables[ts.Name] + t.Log("stats.GetEntryCacheHits: ", stats.GetEntryCacheHits) + t.Log("stats.GetEntriesHits: ", stats.GetEntriesHits) + t.Log("stats.GetEntriesPeak: ", stats.GetEntriesPeak) + t.Log("stats.GetEntriesTime: ", stats.GetEntriesTime) + + if stats.GetEntryCacheHits != 2 || stats.GetEntriesHits != 2 { + t.FailNow() + } + + if stats.GetEntriesPeak == 0 || stats.GetEntriesTime == 0 { + t.FailNow() + } + + deleteTableAndDb(d, &ts, t) +} + +// To test cache hit by populating the one of the entry in the transaction map +func TestGetEntries5(t *testing.T) { + + var pid int = os.Getpid() + + d, e := newDB(ConfigDB) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + d.dbCacheConfig.PerConnection = true + d.dbCacheConfig.CacheTables[ts.Name] = true + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + ca2 := make([]string, 1, 1) + ca2[0] = "MyACL2_ACL_IPVNOTEXIST" + akey2 := Key{Comp: ca2} + + // Add the Entries for Get|DeleteKeys + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + avalue2 := Value{map[string]string{"ports@": "Ethernet1", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey2, avalue2) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys, e := d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 2) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + + e = d.DeleteKeys(&ts, Key{Comp: []string{"MyACL*_ACL_IPVNOTEXIST"}}) + + if e != nil { + t.Errorf("DeleteKeys() fails e = %v", e) + return + } + + // Add the Entries again for Table + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey2, avalue2) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + d.dbStatsConfig.TableStats = true + d.dbStatsConfig.TimeStats = true + + keys = make([]Key, 0) + keys = append(keys, akey) + keys = append(keys, akey2) + + values, errors := d.GetEntries(&ts, keys) + t.Log("values: ", values) + t.Log("errors: ", errors) + + stats := d.stats.Tables[ts.Name] + t.Log("stats.GetEntryCacheHits: ", stats.GetEntryCacheHits) + t.Log("stats.GetEntriesHits: ", stats.GetEntriesHits) + t.Log("stats.GetEntriesPeak: ", stats.GetEntriesPeak) + t.Log("stats.GetEntriesTime: ", stats.GetEntriesTime) + + d.txTsEntryMap = make(map[string]map[string]Value) + d.txTsEntryMap[ts.Name] = make(map[string]Value) + d.txTsEntryMap[ts.Name][d.key2redis(&ts, akey)] = avalue + + values, errors = d.GetEntries(&ts, keys) + t.Log("values: ", values) + t.Log("errors: ", errors) + + stats = d.stats.Tables[ts.Name] + t.Log("stats.GetEntryCacheHits: ", stats.GetEntryCacheHits) + t.Log("stats.GetEntriesHits: ", stats.GetEntriesHits) + t.Log("stats.GetEntriesPeak: ", stats.GetEntriesPeak) + t.Log("stats.GetEntriesTime: ", stats.GetEntriesTime) + + if stats.GetEntryCacheHits != 1 || stats.GetEntriesHits != 2 { + t.FailNow() + } + + if stats.GetEntriesPeak == 0 || stats.GetEntriesTime == 0 { + t.FailNow() + } + + deleteTableAndDb(d, &ts, t) +} diff --git a/translib/db/db_signal.go b/translib/db/db_signal.go new file mode 100644 index 000000000000..81cea6b62d1b --- /dev/null +++ b/translib/db/db_signal.go @@ -0,0 +1,79 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2020 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + // "fmt" + // "strconv" + + // "errors" + // "strings" + + // "github.com/Azure/sonic-mgmt-common/cvl" + // "github.com/go-redis/redis/v7" + "os" + "os/signal" + "syscall" + // "github.com/golang/glog" + // "github.com/Azure/sonic-mgmt-common/translib/tlerr" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +func SignalHandler() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGUSR2) + go func() { + for { + s := <-sigs + if s == syscall.SIGUSR2 { + HandleSIGUSR2() + } + } + }() +} + +func HandleSIGUSR2() { + if dbCacheConfig != nil { + dbCacheConfig.handleReconfigureSignal() + } + + if dbStatsConfig != nil { + dbStatsConfig.handleReconfigureSignal() + } + + if dbRedisOptsConfig != nil { + dbRedisOptsConfig.handleReconfigureSignal() + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Functions // +//////////////////////////////////////////////////////////////////////////////// + +func init() { + SignalHandler() +} diff --git a/translib/db/db_stats.go b/translib/db/db_stats.go new file mode 100644 index 000000000000..47a5c41a7053 --- /dev/null +++ b/translib/db/db_stats.go @@ -0,0 +1,552 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2020 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "reflect" + "sync" + "time" + + "github.com/go-redis/redis/v7" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +type Stats struct { + + // Total Hits + + Hits uint `json:"hits,omitempty"` + + // TimeStats are being collected (true) + + Time time.Duration `json:"total-time,omitempty"` + Peak time.Duration `json:"peak-time,omitempty"` + + // Category Hits + + GetEntryHits uint `json:"get-entry-hits,omitempty"` + GetKeysHits uint `json:"get-keys-hits,omitempty"` + GetKeysPatternHits uint `json:"get-keys-pattern-hits,omitempty"` + GetMapHits uint `json:"get-map-hits,omitempty"` + GetMapAllHits uint `json:"get-map-all-hits,omitempty"` + GetEntriesHits uint `json:"get-entries-hits,omitempty"` + + GetTablePatternHits uint `json:"get-table-pattern-hits,omitempty"` + ExistsKeyPatternHits uint `json:"exists-key-pattern-hits,omitempty"` + + NewScanCursorHits uint `json:"new-scan-cursor-hits,omitempty"` + DeleteScanCursorHits uint `json:"delete-scan-cursor-hits,omitempty"` + GetNextKeysHits uint `json:"get-next-keys-hits,omitempty"` + + // Cache Statistics + + GetEntryCacheHits uint `json:"get-entry-cache-hits,omitempty"` + GetKeysCacheHits uint `json:"keys-cache-hits,omitempty"` + GetKeysPatternCacheHits uint `json:"keys-pattern-cache-hits,omitempty"` + GetMapCacheHits uint `json:"get-map-cache-hits,omitempty"` + GetMapAllCacheHits uint `json:"get-map-all-cache-hits,omitempty"` + + GetTablePatternCacheHits uint `json:"get-table-pattern-cache-hits,omitempty"` + ExistsKeyPatternCacheHits uint `json:"exists-key-pattern-cache-hits,omitempty"` + + // TimeStats are being collected (true) + + GetEntryTime time.Duration `json:"get-entry-time,omitempty"` + GetKeysTime time.Duration `json:"get-keys-time,omitempty"` + GetKeysPatternTime time.Duration `json:"get-keys-pattern-time,omitempty"` + GetMapTime time.Duration `json:"get-map-time,omitempty"` + GetMapAllTime time.Duration `json:"get-map-all-time,omitempty"` + GetNextKeysTime time.Duration `json:"get-next-keys-time,omitempty"` + GetEntriesTime time.Duration `json:"get-entries-time,omitempty"` + + GetTablePatternTime time.Duration `json:"get-table-pattern-time,omitempty"` + ExistsKeyPatternTime time.Duration `json:"exists-key-pattern-time,omitempty"` + + GetEntryPeak time.Duration `json:"get-entry-peak-time,omitempty"` + GetKeysPeak time.Duration `json:"get-keys-peak-time,omitempty"` + GetKeysPatternPeak time.Duration `json:"get-keys-pattern-peak-time,omitempty"` + GetMapPeak time.Duration `json:"get-map-peak-time,omitempty"` + GetMapAllPeak time.Duration `json:"get-map-all-peak-time,omitempty"` + GetNextKeysPeak time.Duration `json:"get-next-keys-peak-time,omitempty"` + GetEntriesPeak time.Duration `json:"get-entries-peak-time,omitempty"` + + GetTablePatternPeak time.Duration `json:"get-table-pattern-peak-time,omitempty"` + ExistsKeyPatternPeak time.Duration `json:"exists-key-pattern-peak-time,omitempty"` + + // CAS Transaction Cmds Stats: Currently Only Used by GetStats() for + // the Candidate Configuration (CC) DB + // Running Totals (i.e. over several DB connections) are not maintained + // reliably. + TxCmdsLen uint `json:"tx-cmds-len"` +} + +type DBStats struct { + Name string `json:"name"` + AllTables Stats `json:"all-tables"` + AllMaps Stats `json:"all-maps"` + Tables map[string]Stats `json:"tables,omitempty"` + Maps map[string]Stats `json:"maps,omitempty"` +} + +type DBGlobalStats struct { + New uint `json:"new-db"` + Delete uint `json:"delete-db"` + PeakOpen uint `json:"peak-open"` + + NewTime time.Duration `json:"new-time,omitempty"` + NewPeak time.Duration `json:"peak-new-time,omitempty"` + + ZeroGetHits uint `json:"zero-get-ops-db"` + + // TableStats are being collected (true) + + Databases []DBStats `json:"dbs,omitempty"` +} + +type DBStatsConfig struct { + TimeStats bool + TableStats bool + MapStats bool +} + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +func GetDBStats() (*DBGlobalStats, error) { + return dbGlobalStats.getStats() +} + +func GetDBStatsTotals() (uint, time.Duration, time.Duration) { + return dbGlobalStats.getStatsTotals() +} + +func ClearDBStats() error { + return dbGlobalStats.clearStats() +} + +func ReconfigureStats() error { + return dbStatsConfig.reconfigure() +} + +// GetStats primarily returns CAS Transaction Cmds list length in AllTables +// The TxCmdsLen is always in the ret.AllTables.TxCmdsLen +func (d *DB) GetStats() *DBStats { + if d == nil { + return &DBStats{} + } + return &(d.stats) +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Functions // +//////////////////////////////////////////////////////////////////////////////// + +var dbGlobalStats *DBGlobalStats +var mutexDBGlobalStats sync.Mutex + +var dbStatsConfig *DBStatsConfig +var defaultDBStatsConfig DBStatsConfig = DBStatsConfig{ + TimeStats: false, + TableStats: false, + MapStats: false, +} + +var reconfigureStatsConfig bool +var mutexStatsConfig sync.Mutex + +func init() { + + dbGlobalStats = &DBGlobalStats{Databases: make([]DBStats, MaxDB)} + + dbStatsConfig = &DBStatsConfig{} + dbStatsConfig.handleReconfigureSignal() + dbStatsConfig.reconfigure() + +} + +//////////////////////////////////////////////////////////////////////////////// +// DBGlobalStats functions // +//////////////////////////////////////////////////////////////////////////////// + +func (stats *DBGlobalStats) getStats() (*DBGlobalStats, error) { + + // Need to give a (deep)copy of the Stats + var dbGlobalStats DBGlobalStats + + mutexDBGlobalStats.Lock() + + dbGlobalStats = *stats + for dbnum, db := range stats.Databases { + dbGlobalStats.Databases[dbnum].Name = DBNum(dbnum).String() + + dbGlobalStats.Databases[dbnum].Tables = make(map[string]Stats, len(db.Tables)) + for name, table := range db.Tables { + dbGlobalStats.Databases[dbnum].Tables[name] = table + } + + dbGlobalStats.Databases[dbnum].Maps = make(map[string]Stats, len(db.Maps)) + for name, mAP := range db.Maps { + dbGlobalStats.Databases[dbnum].Maps[name] = mAP + } + } + + mutexDBGlobalStats.Unlock() + + return &dbGlobalStats, nil +} + +func (stats *DBGlobalStats) getStatsTotals() (uint, time.Duration, time.Duration) { + var hits uint + var timetotal, peak time.Duration + + mutexDBGlobalStats.Lock() + + for _, db := range stats.Databases { + + if db.AllTables.Hits != 0 { + hits += db.AllTables.Hits + timetotal += db.AllTables.Time + if peak < db.AllTables.Peak { + peak = db.AllTables.Peak + } + } else { + for _, table := range db.Tables { + hits += table.Hits + timetotal += table.Time + if peak < table.Peak { + peak = table.Peak + } + } + } + + if db.AllMaps.Hits != 0 { + hits += db.AllMaps.Hits + timetotal += db.AllMaps.Time + if peak < db.AllMaps.Peak { + peak = db.AllMaps.Peak + } + } else { + for _, mAP := range db.Maps { + hits += mAP.Hits + timetotal += mAP.Time + if peak < mAP.Peak { + peak = mAP.Peak + } + } + } + + } + + mutexDBGlobalStats.Unlock() + + return hits, timetotal, peak +} + +func (stats *DBGlobalStats) clearStats() error { + + mutexDBGlobalStats.Lock() + *stats = DBGlobalStats{Databases: make([]DBStats, MaxDB)} + mutexDBGlobalStats.Unlock() + + return nil +} + +func (stats *DBGlobalStats) updateStats(dbNo DBNum, isNew bool, dur time.Duration, connStats *DBStats) error { + + mutexDBGlobalStats.Lock() + + if isNew { + stats.NewTime += dur + if dur > stats.NewPeak { + stats.NewPeak = dur + } + if (stats.New)++; (stats.New - stats.Delete) > stats.PeakOpen { + (stats.PeakOpen)++ + } + } else { + (stats.Delete)++ + if (connStats.AllTables.Hits == 0) && (connStats.AllMaps.Hits == 0) && + (len(connStats.Tables) == 0) && (len(connStats.Maps) == 0) { + (stats.ZeroGetHits)++ + } else { + stats.Databases[dbNo].updateStats(connStats) + } + } + + mutexDBGlobalStats.Unlock() + + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// DBStats functions // +//////////////////////////////////////////////////////////////////////////////// + +func (dbstats *DBStats) Empty() bool { + return dbstats.AllTables.Hits == 0 && + dbstats.AllMaps.Hits == 0 && + len(dbstats.Tables) == 0 && + len(dbstats.Maps) == 0 +} + +func (dbstats *DBStats) updateStats(connStats *DBStats) error { + + var ok bool + + if connStats.AllTables.Hits != 0 { + dbstats.AllTables.updateStats(&(connStats.AllTables)) + } else { + if dbstats.Tables == nil { + dbstats.Tables = make(map[string]Stats, InitialTablesCount) + } + for t, s := range connStats.Tables { + if _, ok = dbstats.Tables[t]; !ok { + dbstats.Tables[t] = s + } else { + var stats Stats = dbstats.Tables[t] + stats.updateStats(&s) + dbstats.Tables[t] = stats + } + } + } + + if connStats.AllMaps.Hits != 0 { + dbstats.AllMaps.updateStats(&(connStats.AllMaps)) + } else { + if dbstats.Maps == nil { + dbstats.Maps = make(map[string]Stats, InitialMapsCount) + } + for t, s := range connStats.Maps { + if _, ok = dbstats.Maps[t]; !ok { + dbstats.Maps[t] = s + } else { + var stats Stats = dbstats.Maps[t] + stats.updateStats(&s) + dbstats.Maps[t] = stats + } + } + } + + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Stats functions // +//////////////////////////////////////////////////////////////////////////////// + +func (stats *Stats) updateStats(connStats *Stats) error { + + if connStats.Hits != 0 { + + stats.Hits += connStats.Hits + + stats.GetEntryHits += connStats.GetEntryHits + stats.GetKeysHits += connStats.GetKeysHits + stats.GetKeysPatternHits += connStats.GetKeysPatternHits + stats.GetMapHits += connStats.GetMapHits + stats.GetMapAllHits += connStats.GetMapAllHits + stats.GetEntriesHits += connStats.GetEntriesHits + + stats.GetTablePatternHits += connStats.GetTablePatternHits + stats.ExistsKeyPatternHits += connStats.ExistsKeyPatternHits + + stats.NewScanCursorHits += connStats.NewScanCursorHits + stats.DeleteScanCursorHits += connStats.DeleteScanCursorHits + stats.GetNextKeysHits += connStats.GetNextKeysHits + + stats.GetEntryCacheHits += connStats.GetEntryCacheHits + stats.GetKeysCacheHits += connStats.GetKeysCacheHits + stats.GetKeysPatternCacheHits += connStats.GetKeysPatternCacheHits + stats.GetMapCacheHits += connStats.GetMapCacheHits + stats.GetMapAllCacheHits += connStats.GetMapAllCacheHits + + stats.GetTablePatternCacheHits += connStats.GetTablePatternCacheHits + stats.ExistsKeyPatternCacheHits += connStats.ExistsKeyPatternCacheHits + + if connStats.Time != 0 { + + stats.Time += connStats.Time + if connStats.Peak > stats.Peak { + stats.Peak = connStats.Peak + } + + stats.GetEntryTime += connStats.GetEntryTime + stats.GetKeysTime += connStats.GetKeysTime + stats.GetKeysPatternTime += connStats.GetKeysPatternTime + stats.GetMapTime += connStats.GetMapTime + stats.GetMapAllTime += connStats.GetMapAllTime + stats.GetEntriesTime += connStats.GetEntriesTime + stats.GetNextKeysTime += connStats.GetNextKeysTime + + stats.GetTablePatternTime += connStats.GetTablePatternTime + stats.ExistsKeyPatternTime += connStats.ExistsKeyPatternTime + + if connStats.GetEntryPeak > stats.GetEntryPeak { + stats.GetEntryPeak = connStats.GetEntryPeak + } + if connStats.GetKeysPeak > stats.GetKeysPeak { + stats.GetKeysPeak = connStats.GetKeysPeak + } + if connStats.GetKeysPatternPeak > stats.GetKeysPatternPeak { + stats.GetKeysPatternPeak = connStats.GetKeysPatternPeak + } + if connStats.GetMapPeak > stats.GetMapPeak { + stats.GetMapPeak = connStats.GetKeysPatternPeak + } + if connStats.GetMapAllPeak > stats.GetMapAllPeak { + stats.GetMapAllPeak = connStats.GetMapAllPeak + } + if connStats.GetEntriesPeak > stats.GetEntriesPeak { + stats.GetEntriesPeak = connStats.GetEntriesPeak + } + if connStats.GetNextKeysPeak > stats.GetNextKeysPeak { + stats.GetNextKeysPeak = connStats.GetNextKeysPeak + } + + if connStats.GetTablePatternPeak > stats.GetTablePatternPeak { + stats.GetTablePatternPeak = connStats.GetTablePatternPeak + } + + if connStats.ExistsKeyPatternPeak > stats.ExistsKeyPatternPeak { + stats.ExistsKeyPatternPeak = connStats.ExistsKeyPatternPeak + } + + } + + } + + stats.TxCmdsLen += connStats.TxCmdsLen + + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Configure DB Stats // +//////////////////////////////////////////////////////////////////////////////// + +func getDBStatsConfig() DBStatsConfig { + dbStatsConfig.reconfigure() + mutexStatsConfig.Lock() + statsConfig := *dbStatsConfig + mutexStatsConfig.Unlock() + return statsConfig +} + +func (config *DBStatsConfig) reconfigure() error { + mutexStatsConfig.Lock() + var doReconfigure bool = reconfigureStatsConfig + if reconfigureStatsConfig { + reconfigureStatsConfig = false + } + mutexStatsConfig.Unlock() + + if doReconfigure { + var readDBStatsConfig DBStatsConfig + readDBStatsConfig.readFromDB() + + mutexStatsConfig.Lock() + configChanged := !reflect.DeepEqual(*config, readDBStatsConfig) + mutexStatsConfig.Unlock() + + if configChanged { + ClearDBStats() + } + + mutexStatsConfig.Lock() + dbStatsConfig = &readDBStatsConfig + mutexStatsConfig.Unlock() + } + return nil +} + +func (config *DBStatsConfig) handleReconfigureSignal() error { + mutexStatsConfig.Lock() + reconfigureStatsConfig = true + mutexStatsConfig.Unlock() + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Read DB Stats Configuration // +//////////////////////////////////////////////////////////////////////////////// + +func (config *DBStatsConfig) readFromDB() error { + fields, e := readRedis("TRANSLIB_DB|default") + if e != nil { + config.TimeStats = defaultDBStatsConfig.TimeStats + config.TableStats = defaultDBStatsConfig.TableStats + config.MapStats = defaultDBStatsConfig.MapStats + } else { + for k, v := range fields { + switch { + case k == "time_stats" && v == "True": + config.TimeStats = true + case k == "time_stats" && v == "False": + config.TimeStats = false + case k == "table_stats" && v == "True": + config.TableStats = true + case k == "table_stats" && v == "False": + config.TableStats = false + case k == "map_stats" && v == "True": + config.MapStats = true + case k == "map_stats" && v == "False": + config.MapStats = false + } + } + } + return e +} + +//////////////////////////////////////////////////////////////////////////////// +// Utility Function to read Redis DB // +//////////////////////////////////////////////////////////////////////////////// + +func readRedis(key string) (map[string]string, error) { + + ipAddr := DefaultRedisLocalTCPEP + dbId := int(ConfigDB) + dbPassword := "" + if dbInstName := getDBInstName(ConfigDB); dbInstName != "" { + if isDbInstPresent(dbInstName) { + ipAddr = getDbTcpAddr(dbInstName) + dbId = getDbId(dbInstName) + dbPassword = getDbPassword(dbInstName) + } + } + + client := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: ipAddr, + Password: dbPassword, + DB: dbId, + DialTimeout: 0, + PoolSize: 1, + }) + + fields, e := client.HGetAll(key).Result() + + client.Close() + + return fields, e +} diff --git a/translib/db/db_table.go b/translib/db/db_table.go new file mode 100644 index 000000000000..1c3072b77395 --- /dev/null +++ b/translib/db/db_table.go @@ -0,0 +1,184 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2020 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "github.com/golang/glog" + + "github.com/Azure/sonic-mgmt-common/translib/tlerr" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +// Table gives the entire table as a map. +// (Eg: { ts: &TableSpec{ Name: "ACL_TABLE" }, +// entry: map[string]Value { +// "ACL_TABLE|acl1|rule1_1": Value { +// Field: map[string]string { +// "type" : "l3v6", "ports" : "Ethernet0", +// } +// }, +// "ACL_TABLE|acl1|rule1_2": Value { +// Field: map[string]string { +// "type" : "l3v6", "ports" : "eth0", +// } +// }, +// } +// }) + +type Table struct { + ts *TableSpec + entry map[string]Value + complete bool + patterns map[string][]Key + db *DB +} + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +// GetTable gets the entire table. +func (d *DB) GetTable(ts *TableSpec) (Table, error) { + if glog.V(3) { + glog.Info("GetTable: Begin: ts: ", ts) + } + + if (d == nil) || (d.client == nil) { + return Table{}, tlerr.TranslibDBConnectionReset{} + } + + /* + table := Table{ + ts: ts, + entry: map[string]Value{ + "table1|k0.0|k0.1": Value{ + map[string]string{ + "f0.0": "v0.0", + "f0.1": "v0.1", + "f0.2": "v0.2", + }, + }, + "table1|k1.0|k1.1": Value{ + map[string]string{ + "f1.0": "v1.0", + "f1.1": "v1.1", + "f1.2": "v1.2", + }, + }, + }, + db: d, + } + */ + + // Create Table + table := Table{ + ts: ts, + entry: make(map[string]Value, InitialTableEntryCount), + complete: true, + patterns: make(map[string][]Key, InitialTablePatternCount), + db: d, + } + + // This can be done via a LUA script as well. For now do this. TBD + // Read Keys + keys, e := d.GetKeys(ts) + if e != nil { + glog.Error("GetTable: GetKeys: " + e.Error()) + table = Table{} + goto GetTableExit + } + + table.patterns[d.key2redis(ts, Key{Comp: []string{"*"}})] = keys + + // For each key in Keys + // Add Value into table.entry[key)] + for i := 0; i < len(keys); i++ { + value, e := d.GetEntry(ts, keys[i]) + if e != nil { + glog.Warning("GetTable: GetKeys: ", d.Name(), + ": ", ts.Name, ": ", e.Error()) + value = Value{} + e = nil + } + table.entry[d.key2redis(ts, keys[i])] = value + } + + // Mark Per Connection Cache table as complete. + if (d.dbCacheConfig.PerConnection && + d.dbCacheConfig.isCacheTable(ts.Name)) || + (d.Opts.IsOnChangeEnabled && d.onCReg.isCacheTable(ts.Name)) { + if cTable, ok := d.cache.Tables[ts.Name]; ok { + cTable.complete = true + } + } + +GetTableExit: + + if glog.V(3) { + glog.Info("GetTable: End: table: ", table) + } + return table, e +} + +// GetKeys method retrieves all entry/row keys from a previously read table. +func (t *Table) GetKeys() ([]Key, error) { + if glog.V(3) { + glog.Info("Table.GetKeys: Begin: t: ", t) + } + + keys := make([]Key, 0, len(t.entry)) + for k := range t.entry { + keys = append(keys, t.db.redis2key(t.ts, k)) + } + + if glog.V(3) { + glog.Info("Table.GetKeys: End: keys: ", keys) + } + return keys, nil +} + +// GetEntry method retrieves an entry/row from a previously read table. +func (t *Table) GetEntry(key Key) (Value, error) { + /* + return Value{map[string]string{ + "f0.0": "v0.0", + "f0.1": "v0.1", + "f0.2": "v0.2", + }, + }, nil + */ + if glog.V(3) { + glog.Info("Table.GetEntry: Begin: t: ", t, " key: ", key) + } + + v := t.entry[t.db.key2redis(t.ts, key)] + + if glog.V(3) { + glog.Info("Table.GetEntry: End: entry: ", v) + } + return v, nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Functions // +//////////////////////////////////////////////////////////////////////////////// diff --git a/translib/db/db_test.go b/translib/db/db_test.go index 2c65e67f1fd2..17d10316d788 100644 --- a/translib/db/db_test.go +++ b/translib/db/db_test.go @@ -19,18 +19,15 @@ package db - import ( - // "fmt" - // "errors" - // "flag" - // "github.com/golang/glog" - "time" - "io/ioutil" + "fmt" "os" - "testing" - "strconv" "reflect" + "strconv" + "testing" + "time" + + "github.com/go-redis/redis/v7" ) var dbConfig = ` @@ -107,65 +104,124 @@ var dbConfig = ` } ` +// "TEST_" prefix is used by a lot of DB Tests. Avoid it. +const DBPAT_TST_PREFIX string = "DBPAT_TST" -func TestMain(m * testing.M) { - - exitCode := 0 +var ts TableSpec = TableSpec{ + Name: DBPAT_TST_PREFIX + strconv.FormatInt(int64(os.Getpid()), 10), +} +var db *DB +var dbOnC *DB + +func newReadOnlyDB(dBNum DBNum) (*DB, error) { + d, e := NewDB(Options{ + DBNo: dBNum, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + IsWriteDisabled: true, + }) + return d, e +} -/* Apparently, on an actual switch the swss container will have - * a redis-server running, which will be in a different container than - * mgmt, thus this pkill stuff to find out it is running will not work. - * +func newOnCDB(dBNum DBNum) (*DB, error) { + d, e := NewDB(Options{ + DBNo: dBNum, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + IsWriteDisabled: true, + IsOnChangeEnabled: true, + }) + return d, e +} - redisServerAttemptedStart := false +// setupTestData populates given test entries in db and deletes all those keys +// whne the test case ends. +func setupTestData(t *testing.T, redis *redis.Client, data map[string]map[string]interface{}) { + keys := make([]string, 0, len(data)) + t.Cleanup(func() { redis.Del(keys...) }) + for k, v := range data { + keys = append(keys, k) + if _, err := redis.HMSet(k, v).Result(); err != nil { + t.Fatalf("HMSET %s failed; err=%v", k, err) + } + } +} -TestMainRedo: - o, e := exec.Command("/usr/bin/pkill", "-HUP", "redis-server").Output() +func testTableSetup(tableEntries int) { + var err error + db, err = newDB(ConfigDB) + if err != nil { + fmt.Printf("newDB() fails err = %v\n", err) + return + } - if e == nil { + for i := 0; i < tableEntries; i++ { + e := db.SetEntry(&ts, + Key{Comp: []string{"KEY" + strconv.FormatInt(int64(i), 10)}}, + Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}}) + if e != nil { + fmt.Printf("SetEntry() fails e = %v\n", e) + return + } + } - } else if redisServerAttemptedStart { + db.DeleteDB() + db, err = newReadOnlyDB(ConfigDB) + if err != nil { + fmt.Printf("newReadOnlyDB() fails err = %v\n", err) + return + } - exitCode = 1 + dbOnC, err = newOnCDB(ConfigDB) + if err != nil { + fmt.Printf("newDB() for OnC fails err = %v\n", err) + return + } - } else { +} - fmt.Printf("TestMain: No redis server: pkill: %v\n", o) - fmt.Println("TestMain: Starting redis-server") - e = exec.Command("/tools/bin/redis-server").Start() - time.Sleep(3 * time.Second) - redisServerAttemptedStart = true - goto TestMainRedo +func testTableTearDown(tableEntries int) { + var err error + if db != nil { + db.DeleteDB() } -*/ - - // Create Temporary DB Config File - dbContent := []byte(dbConfig) - dbFile, e := ioutil.TempFile("/tmp", "dbConfig") - if e != nil { - exitCode = 1 - } else { - defer os.Remove(dbFile.Name()) + db, err = newDB(ConfigDB) + if err != nil { + fmt.Printf("newDB() fails err = %v\n", err) + return } - if _,e := dbFile.Write(dbContent); e != nil { - exitCode = 2 + for i := 0; i < tableEntries; i++ { + e := db.DeleteEntry(&ts, + Key{Comp: []string{"KEY" + strconv.FormatInt(int64(i), 10)}}) + if e != nil { + fmt.Printf("DeleteEntry() fails e = %v", e) + return + } } - if e := dbFile.Close(); e != nil { - exitCode = 3 - } + db.DeleteDB() + + dbOnC.DeleteDB() + +} + +func TestMain(m *testing.M) { - // Set the environment variable to it - os.Setenv("DB_CONFIG_PATH", dbFile.Name()) + exitCode := 0 + testTableSetup(100) if exitCode == 0 { exitCode = m.Run() } - + testTableTearDown(100) os.Exit(exitCode) - + } /* @@ -174,24 +230,23 @@ TestMainRedo: */ -func TestNewDB(t * testing.T) { +func TestNewDB(t *testing.T) { - d,e := NewDB(Options { - DBNo : ConfigDB, - InitIndicator : "", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }) + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) if d == nil { t.Errorf("NewDB() fails e = %v", e) - } else if e = d.DeleteDB() ; e != nil { + } else if e = d.DeleteDB(); e != nil { t.Errorf("DeleteDB() fails e = %v", e) } } - /* 2. Get an entry (GetEntry()) @@ -202,30 +257,30 @@ func TestNewDB(t * testing.T) { */ -func TestNoTransaction(t * testing.T) { +func TestNoTransaction(t *testing.T) { var pid int = os.Getpid() - d,e := NewDB(Options { - DBNo : ConfigDB, - InitIndicator : "", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }) + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) if d == nil { t.Errorf("NewDB() fails e = %v", e) return } - ts := TableSpec { Name: "TEST_" + strconv.FormatInt(int64(pid), 10) } + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} ca := make([]string, 1, 1) ca[0] = "MyACL1_ACL_IPVNOTEXIST" - akey := Key { Comp: ca} - avalue := Value { map[string]string {"ports@":"Ethernet0","type":"MIRROR" }} - e = d.SetEntry(&ts, akey, avalue) + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue) if e != nil { t.Errorf("SetEntry() fails e = %v", e) @@ -234,12 +289,12 @@ func TestNoTransaction(t * testing.T) { v, e := d.GetEntry(&ts, akey) - if (e != nil) || (!reflect.DeepEqual(v,avalue)) { + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { t.Errorf("GetEntry() fails e = %v", e) return } - e = d.DeleteEntry(&ts, akey) + e = d.DeleteEntry(&ts, akey) if e != nil { t.Errorf("DeleteEntry() fails e = %v", e) @@ -253,12 +308,11 @@ func TestNoTransaction(t * testing.T) { return } - if e = d.DeleteDB() ; e != nil { + if e = d.DeleteDB(); e != nil { t.Errorf("DeleteDB() fails e = %v", e) } } - /* 5. Get a Table (GetTable()) @@ -269,43 +323,43 @@ func TestNoTransaction(t * testing.T) { */ -func TestTable(t * testing.T) { +func TestTable(t *testing.T) { var pid int = os.Getpid() - d,e := NewDB(Options { - DBNo : ConfigDB, - InitIndicator : "", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }) + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) if d == nil { t.Errorf("NewDB() fails e = %v", e) return } - ts := TableSpec { Name: "TEST_" + strconv.FormatInt(int64(pid), 10) } + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} ca := make([]string, 1, 1) ca[0] = "MyACL1_ACL_IPVNOTEXIST" - akey := Key { Comp: ca} - avalue := Value { map[string]string {"ports@":"Ethernet0","type":"MIRROR" }} + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} ca2 := make([]string, 1, 1) ca2[0] = "MyACL2_ACL_IPVNOTEXIST" - akey2 := Key { Comp: ca2} + akey2 := Key{Comp: ca2} - // Add the Entries for Get|DeleteKeys + // Add the Entries for Get|DeleteKeys - e = d.SetEntry(&ts, akey, avalue) + e = d.SetEntry(&ts, akey, avalue) if e != nil { t.Errorf("SetEntry() fails e = %v", e) return } - e = d.SetEntry(&ts, akey2, avalue) + e = d.SetEntry(&ts, akey2, avalue) if e != nil { t.Errorf("SetEntry() fails e = %v", e) @@ -319,7 +373,7 @@ func TestTable(t * testing.T) { return } - e = d.DeleteKeys(&ts, Key {Comp: []string {"MyACL*_ACL_IPVNOTEXIST"}}) + e = d.DeleteKeys(&ts, Key{Comp: []string{"MyACL*_ACL_IPVNOTEXIST"}}) if e != nil { t.Errorf("DeleteKeys() fails e = %v", e) @@ -333,18 +387,16 @@ func TestTable(t * testing.T) { return } + // Add the Entries again for Table - - // Add the Entries again for Table - - e = d.SetEntry(&ts, akey, avalue) + e = d.SetEntry(&ts, akey, avalue) if e != nil { t.Errorf("SetEntry() fails e = %v", e) return } - e = d.SetEntry(&ts, akey2, avalue) + e = d.SetEntry(&ts, akey2, avalue) if e != nil { t.Errorf("SetEntry() fails e = %v", e) @@ -360,7 +412,7 @@ func TestTable(t * testing.T) { v, e = tab.GetEntry(akey) - if (e != nil) || (!reflect.DeepEqual(v,avalue)) { + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { t.Errorf("Table.GetEntry() fails e = %v", e) return } @@ -379,13 +431,12 @@ func TestTable(t * testing.T) { return } - if e = d.DeleteDB() ; e != nil { + if e = d.DeleteDB(); e != nil { t.Errorf("DeleteDB() fails e = %v", e) } } - -/* Tests for +/* Tests for 6. Set an entry with Transaction (StartTx(), SetEntry(), CommitTx()) 7. Delete an entry with Transaction (StartTx(), DeleteEntry(), CommitTx()) @@ -403,10 +454,10 @@ func TestTable(t * testing.T) { Cannot Automate 19 for now 19. NT: Check V logs, Error logs - */ +*/ -func TestTransaction(t * testing.T) { - for transRun := TransRunBasic ; transRun < TransRunEnd ; transRun++ { +func TestTransaction(t *testing.T) { + for transRun := TransRunBasic; transRun < TransRunEnd; transRun++ { testTransaction(t, transRun) } } @@ -414,291 +465,1179 @@ func TestTransaction(t * testing.T) { type TransRun int const ( - TransRunBasic TransRun = iota // 0 - TransRunWatchKeys // 1 - TransRunTable // 2 - TransRunWatchKeysAndTable // 3 - TransRunEmptyWatchKeysAndTable // 4 - TransRunFailWatchKeys // 5 - TransRunFailTable // 6 + TransRunBasic TransRun = iota // 0 + TransRunWatchKeys // 1 + TransRunTable // 2 + TransRunWatchKeysAndTable // 3 + TransRunEmptyWatchKeysAndTable // 4 + TransRunFailWatchKeys // 5 + TransRunFailTable // 6 // Nothing after this. TransRunEnd ) -func testTransaction(t * testing.T, transRun TransRun) { +const ( + TransCacheRunGetAfterCreate TransRun = iota // 0 + TransCacheRunGetAfterSingleSet // 1 + TransCacheRunGetAfterMultiSet // 2 + TransCacheRunGetAfterMod // 3 + TransCacheRunGetAfterDelEntry // 4 + TransCacheRunGetAfterDelField // 5 + TransCacheRunGetWithInvalidKey // 6 + TransCacheGetKeysAfterSetAndDeleteKeys // 7 + TransCacheGetKeysWithoutSet // 8 + TransCacheDelEntryEmpty // 9 + TransCacheDelFieldsEmpty // 10 + + // Nothing after this. + TransCacheRunEnd +) + +func TestTransactionCache(t *testing.T) { + // Tests without any data pre-existing in DB + for transRun := TransCacheRunGetAfterCreate; transRun <= TransCacheRunEnd; transRun++ { + testTransactionCache(t, transRun) + } +} + +//TestTransactionCacheWithDBContentKeysPattern +/* +Add a new entry for a table who has already has one entry pre-exisint in DB and performs below checks. +1. GetKeysPattern checks for number of required required +2. DeleteEntry and then GetKeysPattern, checks for number of required required +*/ +func TestTransactionCacheWithDBContentKeysPattern(t *testing.T) { var pid int = os.Getpid() - d,e := NewDB(Options { - DBNo : ConfigDB, - InitIndicator : "", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }) + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) if d == nil { - t.Errorf("NewDB() fails e = %v, transRun = %v", e, transRun) + t.Errorf("NewDB() fails e = %v", e) return } - ts := TableSpec { Name: "TEST_" + strconv.FormatInt(int64(pid), 10) } - - ca := make([]string, 1, 1) - ca[0] = "MyACL1_ACL_IPVNOTEXIST" - akey := Key { Comp: ca} - avalue := Value { map[string]string {"ports@":"Ethernet0","type":"MIRROR" }} + e = d.StartTx(nil, nil) - var watchKeys []WatchKeys - var table []*TableSpec + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} - switch transRun { - case TransRunBasic, TransRunWatchKeysAndTable: - watchKeys = []WatchKeys{{Ts: &ts, Key: &akey}} - table = []*TableSpec { &ts } - case TransRunWatchKeys, TransRunFailWatchKeys: - watchKeys = []WatchKeys{{Ts: &ts, Key: &akey}} - table = []*TableSpec { } - case TransRunTable, TransRunFailTable: - watchKeys = []WatchKeys{} - table = []*TableSpec { &ts } + ca := make([]string, 1, 1) + ca[0] = "DUMMY_ACL_1" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return } - - e = d.StartTx(watchKeys, table) + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.CommitTx() if e != nil { - t.Errorf("StartTx() fails e = %v", e) + t.Errorf("CommitTx() fails e = %v", e) return } + e = d.StartTx(nil, nil) + keys, e := d.GetKeysPattern(&ts, Key{Comp: []string{"DUMMY_ACL_*"}}) - e = d.SetEntry(&ts, akey, avalue) - + if (e != nil) || (len(keys) != 1) || (!keys[0].Equals(akey)) { + t.Errorf("GetKeysPattern() fails e = %v", e) + return + } + ca[0] = "DUMMY_ACL_2" + akey = Key{Comp: ca} + e = d.SetEntry(&ts, akey, avalue) if e != nil { t.Errorf("SetEntry() fails e = %v", e) return } + keys, e = d.GetKeysPattern(&ts, Key{Comp: []string{"DUMMY_ACL_*"}}) - e = d.CommitTx() + if (e != nil) || (len(keys) != 2) { + t.Errorf("GetKeysPattern() fails e = %v", e) + return + } + e = d.DeleteEntry(&ts, akey) + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + keys, e = d.GetKeysPattern(&ts, Key{Comp: []string{"DUMMY_ACL_*"}}) + if (e != nil) || (len(keys) != 1) { + t.Errorf("GetKeysPattern() fails e = %v", e) + return + } + ca[0] = "DUMMY_ACL_1" + akey = Key{Comp: ca} + e = d.DeleteEntry(&ts, akey) + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + e = d.CommitTx() if e != nil { t.Errorf("CommitTx() fails e = %v", e) return } - v, e := d.GetEntry(&ts, akey) + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} - if (e != nil) || (!reflect.DeepEqual(v,avalue)) { - t.Errorf("GetEntry() after Tx fails e = %v", e) +//TestTransactionCacheMultiKeysPattern +/* +1. Sets a Table entry with multikey +2. Performs GetEntry, GetKeysPattern and GetKeysByPattern +3. Deletes an entry +4. Re-Performs GetEntry and GetKeysPattern +*/ +func TestTransactionCacheMultiKeysPattern(t *testing.T) { + + var pid int = os.Getpid() + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) return } - e = d.StartTx(watchKeys, table) + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + ca := make([]string, 2, 2) + ca[0] = "Vlan10" + ca[1] = "Ethernet0" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + + e = d.StartTx(nil, nil) if e != nil { t.Errorf("StartTx() fails e = %v", e) return } - e = d.DeleteEntry(&ts, akey) - + e = d.SetEntry(&ts, akey, avalue) if e != nil { - t.Errorf("DeleteEntry() fails e = %v", e) + t.Errorf("SetEntry() fails e = %v", e) return } - - e = d.AbortTx() - - if e != nil { - t.Errorf("AbortTx() fails e = %v", e) + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) return } - v, e = d.GetEntry(&ts, akey) + keys, e := d.GetKeysPattern(&ts, Key{Comp: []string{"*", "*Ethernet0"}}) - if (e != nil) || (!reflect.DeepEqual(v,avalue)) { - t.Errorf("GetEntry() after Abort Tx fails e = %v", e) + if (e != nil) || (len(keys) != 1) || (!keys[0].Equals(akey)) { + t.Errorf("GetKeysPattern() fails e = %v", e) return } - e = d.StartTx(watchKeys, table) + keys, e = d.GetKeysByPattern(&ts, "*Ethernet0") - if e != nil { - t.Errorf("StartTx() fails e = %v", e) + if (e != nil) || (len(keys) != 1) || (!keys[0].Equals(akey)) { + t.Errorf("GetKeysPattern() fails e = %v", e) return } - e = d.DeleteEntry(&ts, akey) - + e = d.DeleteEntry(&ts, akey) if e != nil { t.Errorf("DeleteEntry() fails e = %v", e) return } - - switch transRun { - case TransRunFailWatchKeys, TransRunFailTable: - d2,_ := NewDB(Options { - DBNo : ConfigDB, - InitIndicator : "", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }) - - d2.StartTx(watchKeys, table); - d2.DeleteEntry(&ts, akey) - d2.CommitTx(); - d2.DeleteDB(); - default: + v, e = d.GetEntry(&ts, akey) + if e == nil { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return } - e = d.CommitTx() - - switch transRun { - case TransRunFailWatchKeys, TransRunFailTable: - if e == nil { - t.Errorf("NT CommitTx() tr: %v fails e = %v", - transRun, e) - return - } - default: - if e != nil { - t.Errorf("CommitTx() fails e = %v", e) - return - } + keys, e = d.GetKeysPattern(&ts, Key{Comp: []string{"*", "*Ethernet0"}}) + if (e != nil) || (len(keys) != 0) { + t.Errorf("GetKeysPattern() fails e = %v", e) + return } - v, e = d.GetEntry(&ts, akey) + keys, e = d.GetKeysByPattern(&ts, "*Ethernet0") - if e == nil { - t.Errorf("GetEntry() after Tx DeleteEntry() fails e = %v", e) + if (e != nil) || (len(keys) != 0) { + t.Errorf("GetKeysPattern() fails e = %v", e) return } - d.DeleteMapAll(&ts) + e = d.CommitTx() + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } - if e = d.DeleteDB() ; e != nil { + if e = d.DeleteDB(); e != nil { t.Errorf("DeleteDB() fails e = %v", e) } } - -func TestMap(t * testing.T) { +//TestTransactionCacheWithDBContentKeys +/* +Add a new entry for a table who has already has one entry pre-exisint in DB and performs below checks. +1. GetKeys checks for number of required required +2. DeleteEntry and then GetKeys, checks for number of required required +*/ +func TestTransactionCacheWithDBContentKeys(t *testing.T) { var pid int = os.Getpid() - d,e := NewDB(Options { - DBNo : ConfigDB, - InitIndicator : "", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }) + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) if d == nil { t.Errorf("NewDB() fails e = %v", e) return } - ts := TableSpec { Name: "TESTMAP_" + strconv.FormatInt(int64(pid), 10) } + e = d.StartTx(nil, nil) - d.SetMap(&ts, "k1", "v1"); - d.SetMap(&ts, "k2", "v2"); + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} - if v, e := d.GetMap(&ts, "k1"); v != "v1" { - t.Errorf("GetMap() fails e = %v", e) + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) return } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.CommitTx() - if v, e := d.GetMapAll(&ts) ; - (e != nil) || - (!reflect.DeepEqual(v, - Value{ Field: map[string]string { - "k1" : "v1", "k2" : "v2" }})) { - t.Errorf("GetMapAll() fails e = %v", e) + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) return } + e = d.StartTx(nil, nil) + keys, e := d.GetKeys(&ts) //DB get verify - d.DeleteMapAll(&ts) + if (e != nil) || (len(keys) != 1) || (!keys[0].Equals(akey)) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + e = d.DeleteEntry(&ts, akey) + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + keys, e = d.GetKeys(&ts) //Cache get verify + + if (e != nil) || (len(keys) != 0) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + e = d.CommitTx() + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } - if e = d.DeleteDB() ; e != nil { + if e = d.DeleteDB(); e != nil { t.Errorf("DeleteDB() fails e = %v", e) } } -func TestSubscribe(t * testing.T) { +//TestTransactionCacheWithDBContentDel +/* +Add a new entry for a table who has already has one entry pre-exisint in DB and performs below checks. +1. GetEntry +2. DeleteEntry and then GetEntry +*/ +func TestTransactionCacheWithDBContentDel(t *testing.T) { var pid int = os.Getpid() - var hSetCalled, hDelCalled, delCalled bool - - d,e := NewDB(Options { - DBNo : ConfigDB, - InitIndicator : "", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }) + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) - if (d == nil) || (e != nil) { + if d == nil { t.Errorf("NewDB() fails e = %v", e) return } - ts := TableSpec { Name: "TEST_" + strconv.FormatInt(int64(pid), 10) } + e = d.StartTx(nil, nil) + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} ca := make([]string, 1, 1) ca[0] = "MyACL1_ACL_IPVNOTEXIST" - akey := Key { Comp: ca} - avalue := Value { map[string]string {"ports@":"Ethernet0","type":"MIRROR" }} - - var skeys [] *SKey = make([]*SKey, 1) - skeys[0] = & (SKey { Ts: &ts, Key: &akey, - SEMap: map[SEvent]bool { - SEventHSet: true, - SEventHDel: true, - SEventDel: true, - }}) - - s,e := SubscribeDB(Options { - DBNo : ConfigDB, - InitIndicator : "CONFIG_DB_INITIALIZED", - TableNameSeparator: "|", - KeySeparator : "|", - DisableCVLCheck : true, - }, skeys, func (s *DB, - skey *SKey, key *Key, - event SEvent) error { - switch event { - case SEventHSet: hSetCalled = true - case SEventHDel: hDelCalled = true - case SEventDel: delCalled = true - default: - } - return nil }) + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.CommitTx() - if (s == nil) || (e != nil) { - t.Errorf("Subscribe() returns error e: %v", e) + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + e = d.StartTx(nil, nil) + v, e = d.GetEntry(&ts, akey) //DB get verify + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.DeleteEntry(&ts, akey) + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + _, e = d.GetEntry(&ts, akey) //verify from cache + if e == nil { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.CommitTx() + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) return } - d.SetEntry(&ts, akey, avalue) - d.DeleteEntryFields(&ts, akey, avalue) + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} - time.Sleep(5 * time.Second) +//TestTransactionCacheWithDBContentDelFields +/* +Add a new entry for a table who has already has one entry pre-exisint in DB and performs below checks. +1. GetEntry +2. DeleteEntryFields and then GetEntry +*/ +func TestTransactionCacheWithDBContentDelFields(t *testing.T) { - if !hSetCalled || !hDelCalled || !delCalled { - t.Errorf("Subscribe() callbacks missed: %v %v %v", hSetCalled, - hDelCalled, delCalled) + var pid int = os.Getpid() + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) return } - s.UnsubscribeDB() + e = d.StartTx(nil, nil) - time.Sleep(2 * time.Second) + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} - if e = d.DeleteDB() ; e != nil { + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "changed desc"}} + e = d.SetEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.CommitTx() + + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + e = d.StartTx(nil, nil) + v, e = d.GetEntry(&ts, akey) //DB get verify + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + avalue2 := Value{map[string]string{"policy_desc": "changed desc"}} + avalue3 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.DeleteEntryFields(&ts, akey, avalue2) + if e != nil { + t.Errorf("DeleteEntryFields() fails e = %v", e) + return + } + v, e = d.GetEntry(&ts, akey) //verify from cache + if (e != nil) || (!reflect.DeepEqual(v, avalue3)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.DeleteEntry(&ts, akey) //verify from cache + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + e = d.CommitTx() + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + + if e = d.DeleteDB(); e != nil { t.Errorf("DeleteDB() fails e = %v", e) } } +//TestTransactionCacheWithDBContentMod +/* +Add a new entry for a table who has already has one entry pre-exisint in DB and performs below checks. +1. GetEntry +2. ModEntry and then GetEntry +*/ +func TestTransactionCacheWithDBContentMod(t *testing.T) { + + var pid int = os.Getpid() + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + e = d.StartTx(nil, nil) + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.CommitTx() + + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + e = d.StartTx(nil, nil) + v, e = d.GetEntry(&ts, akey) //DB get verify + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + avalue2 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "changed desc"}} + e = d.ModEntry(&ts, akey, avalue2) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e = d.GetEntry(&ts, akey) //verify from cache + if (e != nil) || (!reflect.DeepEqual(v, avalue2)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.DeleteEntry(&ts, akey) //verify from cache + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + e = d.CommitTx() + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} + +//TestTransactionCacheWithDBContentSet +/* +Add a new entry for a table who has already has one entry pre-exisint in DB and performs below checks. +1. GetEntry +2. SetEntry and then GetEntry +*/ +func TestTransactionCacheWithDBContentSet(t *testing.T) { + + var pid int = os.Getpid() + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + e = d.StartTx(nil, nil) + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.CommitTx() + + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + e = d.StartTx(nil, nil) + v, e = d.GetEntry(&ts, akey) //DB get verify + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.SetEntry(&ts, akey, avalue) //SET tx cache + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e = d.GetEntry(&ts, akey) //verify from cache + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.DeleteEntry(&ts, akey) //verify from cache + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + e = d.CommitTx() + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} + +func testTransactionCache(t *testing.T, transRun TransRun) { + + var pid int = os.Getpid() + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v, transRun = %v", e, transRun) + return + } + + e = d.StartTx(nil, nil) + + if e != nil { + t.Errorf("StartTx() fails e = %v", e) + return + } + + switch transRun { + case TransCacheRunGetAfterCreate: + //Performs GetEntry after Create + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.CreateEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("CreateEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + case TransCacheRunGetAfterSingleSet: + //Performs GetEntry after single SetEntry + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + case TransCacheRunGetAfterMultiSet: + //Performs GetEntry after multiple SetEntry + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue1 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "some desc"}} + e = d.SetEntry(&ts, akey, avalue1) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue1)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + avalue2 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + e = d.SetEntry(&ts, akey, avalue2) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e = d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue2)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + case TransCacheRunGetAfterMod: + //Performs GetEntry after ModEntry + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue1 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "some desc"}} + e = d.SetEntry(&ts, akey, avalue1) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue1)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + avalue2 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "changed desc"}} + e = d.ModEntry(&ts, akey, avalue2) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e = d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue2)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + case TransCacheRunGetWithInvalidKey: + //Performs GetEntry for invalid Entry + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + _, e := d.GetEntry(&ts, akey) + if e == nil { + t.Errorf("GetEntry() should report error") + return + } + case TransCacheRunGetAfterDelEntry: + //Performs GetEntrys After DelEntry + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue1 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "some desc"}} + e = d.SetEntry(&ts, akey, avalue1) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue1)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + e = d.DeleteEntry(&ts, akey) + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + v, e = d.GetEntry(&ts, akey) + if (e == nil) || (reflect.DeepEqual(v, avalue1)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + case TransCacheRunGetAfterDelField: + //Performs GetEntrys After DelEntryFields + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue1 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "some desc"}} + e = d.SetEntry(&ts, akey, avalue1) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + v, e := d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue1)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + avalue2 := Value{map[string]string{"policy_desc": "some desc"}} + e = d.DeleteEntryFields(&ts, akey, avalue2) + if e != nil { + t.Errorf("DeleteEntryFields() fails e = %v", e) + return + } + avalue3 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + v, e = d.GetEntry(&ts, akey) + if (e != nil) || (!reflect.DeepEqual(v, avalue3)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + case TransCacheGetKeysAfterSetAndDeleteKeys: + //Performs GetKeys After Set and Delete of Keys + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue1 := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR", "policy_desc": "some desc"}} + e = d.SetEntry(&ts, akey, avalue1) + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + keys, e := d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 1) || (!keys[0].Equals(akey)) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + + e = d.DeleteKeys(&ts, akey) + + if e != nil { + t.Errorf("DeleteKeys() fails e = %v", e) + return + } + + keys, e = d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 0) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + case TransCacheGetKeysWithoutSet: + //Performs GetKeys on non-existing table spec + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + keys, e := d.GetKeys(&ts) + + if (e != nil) || (len(keys) != 0) { + t.Errorf("GetKeys() fails e = %v", e) + return + } + case TransCacheDelEntryEmpty: + //Performs DelEntry on non-existing entry + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + e = d.DeleteEntry(&ts, akey) + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + case TransCacheDelFieldsEmpty: + //performs deleteEntryFields on non-existing entry field + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"policy_desc": "some desc"}} + e = d.DeleteEntryFields(&ts, akey, avalue) + if e != nil { + t.Errorf("DeleteEntryFields() fails e = %v", e) + return + } + } + + e = d.AbortTx() + + if e != nil { + t.Errorf("AbortTx() fails e = %v", e) + return + } + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} + +func testTransaction(t *testing.T, transRun TransRun) { + + var pid int = os.Getpid() + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v, transRun = %v", e, transRun) + return + } + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + + var watchKeys []WatchKeys + var table []*TableSpec + + switch transRun { + case TransRunBasic, TransRunWatchKeysAndTable: + watchKeys = []WatchKeys{{Ts: &ts, Key: &akey}} + table = []*TableSpec{&ts} + case TransRunWatchKeys, TransRunFailWatchKeys: + watchKeys = []WatchKeys{{Ts: &ts, Key: &akey}} + table = []*TableSpec{} + case TransRunTable, TransRunFailTable: + watchKeys = []WatchKeys{} + table = []*TableSpec{&ts} + } + + e = d.StartTx(watchKeys, table) + + if e != nil { + t.Errorf("StartTx() fails e = %v", e) + return + } + + e = d.SetEntry(&ts, akey, avalue) + + if e != nil { + t.Errorf("SetEntry() fails e = %v", e) + return + } + + e = d.CommitTx() + + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + + v, e := d.GetEntry(&ts, akey) + + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Tx fails e = %v", e) + return + } + + e = d.StartTx(watchKeys, table) + + if e != nil { + t.Errorf("StartTx() fails e = %v", e) + return + } + + e = d.DeleteEntry(&ts, akey) + + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + + e = d.AbortTx() + + if e != nil { + t.Errorf("AbortTx() fails e = %v", e) + return + } + + v, e = d.GetEntry(&ts, akey) + + if (e != nil) || (!reflect.DeepEqual(v, avalue)) { + t.Errorf("GetEntry() after Abort Tx fails e = %v", e) + return + } + + e = d.StartTx(watchKeys, table) + + if e != nil { + t.Errorf("StartTx() fails e = %v", e) + return + } + + e = d.DeleteEntry(&ts, akey) + + if e != nil { + t.Errorf("DeleteEntry() fails e = %v", e) + return + } + + var lockFail bool + switch transRun { + case TransRunFailWatchKeys, TransRunFailTable: + d2, e2 := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if e2 != nil { + lockFail = true + break + } + + d2.StartTx(watchKeys, table) + d2.DeleteEntry(&ts, akey) + d2.CommitTx() + d2.DeleteDB() + default: + } + + e = d.CommitTx() + + switch transRun { + case TransRunFailWatchKeys, TransRunFailTable: + if !lockFail && e == nil { + t.Errorf("NT CommitTx() tr: %v fails e = %v", + transRun, e) + return + } + default: + if e != nil { + t.Errorf("CommitTx() fails e = %v", e) + return + } + } + + v, e = d.GetEntry(&ts, akey) + + if e == nil { + t.Errorf("GetEntry() after Tx DeleteEntry() fails e = %v", e) + return + } + + d.DeleteMapAll(&ts) + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} + +func TestMap(t *testing.T) { + + var pid int = os.Getpid() + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if d == nil { + t.Errorf("NewDB() fails e = %v", e) + return + } + + ts := TableSpec{Name: "TESTMAP_" + strconv.FormatInt(int64(pid), 10)} + + d.SetMap(&ts, "k1", "v1") + d.SetMap(&ts, "k2", "v2") + + if v, e := d.GetMap(&ts, "k1"); v != "v1" { + t.Errorf("GetMap() fails e = %v", e) + return + } + + if v, e := d.GetMapAll(&ts); (e != nil) || + (!reflect.DeepEqual(v, + Value{Field: map[string]string{ + "k1": "v1", "k2": "v2"}})) { + t.Errorf("GetMapAll() fails e = %v", e) + return + } + + d.DeleteMapAll(&ts) + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} + +func TestSubscribe(t *testing.T) { + + var pid int = os.Getpid() + + var hSetCalled, hDelCalled, delCalled bool + + d, e := NewDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }) + + if (d == nil) || (e != nil) { + t.Errorf("NewDB() fails e = %v", e) + return + } + + ts := TableSpec{Name: "TEST_" + strconv.FormatInt(int64(pid), 10)} + + ca := make([]string, 1, 1) + ca[0] = "MyACL1_ACL_IPVNOTEXIST" + akey := Key{Comp: ca} + avalue := Value{map[string]string{"ports@": "Ethernet0", "type": "MIRROR"}} + + var skeys []*SKey = make([]*SKey, 1) + skeys[0] = &(SKey{Ts: &ts, Key: &akey, + SEMap: map[SEvent]bool{ + SEventHSet: true, + SEventHDel: true, + SEventDel: true, + }}) + + s, e := SubscribeDB(Options{ + DBNo: ConfigDB, + InitIndicator: "", + TableNameSeparator: "|", + KeySeparator: "|", + DisableCVLCheck: true, + }, skeys, func(s *DB, + skey *SKey, key *Key, + event SEvent) error { + switch event { + case SEventHSet: + hSetCalled = true + case SEventHDel: + hDelCalled = true + case SEventDel: + delCalled = true + default: + } + return nil + }) + + if (s == nil) || (e != nil) { + t.Errorf("Subscribe() returns error e: %v", e) + return + } + + d.SetEntry(&ts, akey, avalue) + d.DeleteEntryFields(&ts, akey, avalue) + + time.Sleep(5 * time.Second) + + if !hSetCalled || !hDelCalled || !delCalled { + t.Errorf("Subscribe() callbacks missed: %v %v %v", hSetCalled, + hDelCalled, delCalled) + return + } + + s.UnsubscribeDB() + + time.Sleep(2 * time.Second) + + if e = d.DeleteDB(); e != nil { + t.Errorf("DeleteDB() fails e = %v", e) + } +} diff --git a/translib/db/db_value.go b/translib/db/db_value.go new file mode 100644 index 000000000000..a439a1249695 --- /dev/null +++ b/translib/db/db_value.go @@ -0,0 +1,215 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2023 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +//////////////////////////////////////////////////////////////////////////////// +// Exported Types // +//////////////////////////////////////////////////////////////////////////////// + +// Value gives the fields as a map. +// (Eg: { Field: map[string]string { "type" : "l3v6", "ports" : "eth0" } } ). +type Value struct { + Field map[string]string +} + +//////////////////////////////////////////////////////////////////////////////// +// Exported Functions // +//////////////////////////////////////////////////////////////////////////////// + +func (v Value) String() string { + var str string + for k, v1 := range v.Field { + str = str + fmt.Sprintf("\"%s\": \"%s\"\n", k, v1) + } + + return str +} + +func (v Value) Copy() (rV Value) { + rV = Value{Field: make(map[string]string, len(v.Field))} + for k, v1 := range v.Field { + rV.Field[k] = v1 + } + return +} + +// Compare4TxOps gives the Redis Ops/Notifs that are to be generated whilst +// going from a v Value to dst Value. Returns if HSet, and/or HDel needs +// to be performed +func (v Value) Compare4TxOps(dst Value) (isHSet, isHDel bool) { + for _, fs := range v.Field { + if fd, fdOk := dst.Field[fs]; !fdOk { + isHDel = true + } else if fd != fs { + isHSet = true + } + if isHDel && isHSet { + return + } + } + + for _, fd := range dst.Field { + if _, fsOk := v.Field[fd]; !fsOk { + isHSet = true + } + if isHSet { + return + } + } + return +} + +//===== Functions for db.Value ===== + +func (v *Value) IsPopulated() bool { + return len(v.Field) > 0 +} + +// Has function checks if a field exists. +func (v *Value) Has(name string) bool { + _, flag := v.Field[name] + return flag +} + +// Get returns the value of a field. Returns empty string if the field +// does not exists. Use Has() function to check existance of field. +func (v *Value) Get(name string) string { + return v.Field[name] +} + +// Set function sets a string value for a field. +func (v *Value) Set(name, value string) { + v.Field[name] = value +} + +// GetInt returns value of a field as int. Returns 0 if the field does +// not exists. Returns an error if the field value is not a number. +func (v *Value) GetInt(name string) (int, error) { + data, ok := v.Field[name] + if ok { + return strconv.Atoi(data) + } + return 0, nil +} + +// SetInt sets an integer value for a field. +func (v *Value) SetInt(name string, value int) { + v.Set(name, strconv.Itoa(value)) +} + +// GetList returns the value of a an array field. A "@" suffix is +// automatically appended to the field name if not present (as per +// swsssdk convention). Field value is split by comma and resulting +// slice is returned. Empty slice is returned if field not exists. +func (v *Value) GetList(name string) []string { + var data string + if strings.HasSuffix(name, "@") { + data = v.Get(name) + } else { + data = v.Get(name + "@") + } + + if len(data) == 0 { + return []string{} + } + + return strings.Split(data, ",") +} + +// SetList function sets an list value to a field. Field name and +// value are formatted as per swsssdk conventions: +// - A "@" suffix is appended to key name +// - Field value is the comma separated string of list items +func (v *Value) SetList(name string, items []string) { + if !strings.HasSuffix(name, "@") { + name += "@" + } + + if len(items) != 0 { + data := strings.Join(items, ",") + v.Set(name, data) + } else { + v.Remove(name) + } +} + +// Remove function removes a field from this Value. +func (v *Value) Remove(name string) { + delete(v.Field, name) +} + +// ContainsAll returns true if this value is a superset of the other value. +func (v *Value) ContainsAll(other *Value) bool { + if len(v.Field) < len(other.Field) { + return false + } + for oName, oVal := range other.Field { + switch fVal, ok := v.Field[oName]; { + case !ok: // field not present + return false + case fVal == oVal: // field values match + continue + case oName[len(oName)-1] != '@': // non leaf-list value mismatch + return false + case !leaflistEquals(fVal, oVal): // leaf-list value mismatch, ignoring order + return false + } + } + return true +} + +// Equals returns true if this value contains same set of attributes as the other value +func (v *Value) Equals(other *Value) bool { + return len(v.Field) == len(other.Field) && v.ContainsAll(other) +} + +//////////////////////////////////////////////////////////////////////////////// +// Internal Functions // +//////////////////////////////////////////////////////////////////////////////// + +// leaflistEquals compares two leaf-list values (comma separated instances) +// are same. Ignores the order of instances +func leaflistEquals(a, b string) bool { + if len(a) != len(b) { + return false + } + + listA := strings.Split(a, ",") + listB := strings.Split(b, ",") + if len(listA) != len(listB) { + return false + } + + sort.Strings(listA) + for _, s := range listB { + if k := sort.SearchStrings(listA, s); k == len(listA) || listA[k] != s { + return false + } + } + + return true +} diff --git a/translib/db/db_value_test.go b/translib/db/db_value_test.go new file mode 100644 index 000000000000..b7e1d827e570 --- /dev/null +++ b/translib/db/db_value_test.go @@ -0,0 +1,195 @@ +//////////////////////////////////////////////////////////////////////////////// +// // +// Copyright 2023 Broadcom. The term Broadcom refers to Broadcom Inc. and/or // +// its subsidiaries. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"); // +// you may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +// // +//////////////////////////////////////////////////////////////////////////////// + +package db + +import ( + "testing" +) + +func verifyValueContainsAll(t *testing.T, v1, v2 Value, exp bool) { + if v1.ContainsAll(&v2) != exp { + t.Errorf("v1.ContainsAll(v2) != %v\nv1 = %v\nv2 = %v", exp, v1.Field, v2.Field) + } +} + +func verifyValueEquals(t *testing.T, v1, v2 Value, exp bool) { + if v1.Equals(&v2) != exp { + t.Errorf("v1.Equals(v2) != %v\nv1 = %v\nv2 = %v", exp, v1.Field, v2.Field) + } +} + +func TestValueContainsAll(t *testing.T) { + testContainsAll := func(v1, v2 Value, exp bool) func(*testing.T) { + return func(tt *testing.T) { verifyValueContainsAll(tt, v1, v2, exp) } + } + + /* both equal cases */ + t.Run("nil-nil", testContainsAll(Value{}, Value{}, true)) + t.Run("empty-empty", testContainsAll( + Value{Field: map[string]string{}}, + Value{Field: map[string]string{}}, + true, + )) + t.Run("xyz-xyz", testContainsAll( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{"one": "1", "two": "2"}}, + true, + )) + + /* contains more */ + t.Run("xyz-empty", testContainsAll( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{}}, + true, + )) + t.Run("xyz-xy", testContainsAll( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{"one": "1"}}, + true, + )) + + /* not contains cases */ + t.Run("empty-xyz", testContainsAll( + Value{Field: map[string]string{}}, + Value{Field: map[string]string{"one": "1", "two": "2"}}, + false, + )) + t.Run("xyz-XYZ", testContainsAll( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{"one": "1", "two": "002"}}, + false, + )) + t.Run("xyz-abc", testContainsAll( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{"one": "1", "hello": "world"}}, + false, + )) + t.Run("xyz-xyzL", testContainsAll( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{"one": "1", "two": "2", "L@": "foo"}}, + false, + )) + + /* leaf-list cases */ + t.Run("list,empty", testContainsAll( + Value{Field: map[string]string{"L@": "", "one": "1", "two": "002"}}, + Value{Field: map[string]string{"L@": ""}}, + true, + )) + t.Run("list,1inst", testContainsAll( + Value{Field: map[string]string{"L@": "foo", "one": "1"}}, + Value{Field: map[string]string{"L@": "foo", "one": "1"}}, + true, + )) + t.Run("list,equal", testContainsAll( + Value{Field: map[string]string{"L@": "foo,bar", "one": "1"}}, + Value{Field: map[string]string{"L@": "foo,bar"}}, + true, + )) + t.Run("list,out_of_order", testContainsAll( + Value{Field: map[string]string{"L@": "foo,bar,01,002,0003,00004"}}, + Value{Field: map[string]string{"L@": "0003,bar,01,00004,foo,002"}}, + true, + )) + + /* leaf-list mismatch cases */ + t.Run("list,more_inst", testContainsAll( + Value{Field: map[string]string{"L@": "foo,bar"}}, + Value{Field: map[string]string{"L@": "foo+bar"}}, + false, + )) + t.Run("list,less_inst", testContainsAll( + Value{Field: map[string]string{"L@": "foo+bar"}}, + Value{Field: map[string]string{"L@": "foo,bar"}}, + false, + )) + t.Run("list,diff", testContainsAll( + Value{Field: map[string]string{"L@": "foo,bar,001"}}, + Value{Field: map[string]string{"L@": "foo,bar,002"}}, + false, + )) + t.Run("list,diff_len", testContainsAll( + Value{Field: map[string]string{"L@": "foo,bar"}}, + Value{Field: map[string]string{"L@": "hello,world"}}, + false, + )) + +} + +func TestValueEquals(t *testing.T) { + testEquals := func(v1, v2 Value, exp bool) func(*testing.T) { + return func(tt *testing.T) { verifyValueEquals(tt, v1, v2, exp) } + } + + t.Run("nil-nil", testEquals(Value{}, Value{}, true)) + t.Run("empty-empty", testEquals( + Value{Field: map[string]string{}}, + Value{Field: map[string]string{}}, + true, + )) + t.Run("xyz-xyz", testEquals( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{"one": "1", "two": "2"}}, + true, + )) + + t.Run("list,empty", testEquals( + Value{Field: map[string]string{"L@": ""}}, + Value{Field: map[string]string{"L@": ""}}, + true, + )) + t.Run("list,1inst", testEquals( + Value{Field: map[string]string{"L@": "foo", "one": "1"}}, + Value{Field: map[string]string{"L@": "foo", "one": "1"}}, + true, + )) + t.Run("list,equal", testEquals( + Value{Field: map[string]string{"L@": "foo,bar"}}, + Value{Field: map[string]string{"L@": "foo,bar"}}, + true, + )) + t.Run("list,out_of_order", testEquals( + Value{Field: map[string]string{"L@": "foo,bar,01,002,0003,00004"}}, + Value{Field: map[string]string{"L@": "0003,bar,01,00004,foo,002"}}, + true, + )) + + t.Run("empty-xyz", testEquals( + Value{Field: map[string]string{}}, + Value{Field: map[string]string{"one": "1", "two": "2"}}, + false, + )) + t.Run("xyz-empty", testEquals( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{}}, + false, + )) + t.Run("xyz-XYZ", testEquals( + Value{Field: map[string]string{"one": "1", "two": "2"}}, + Value{Field: map[string]string{"one": "01", "two": "02"}}, + false, + )) + t.Run("list,diff", testEquals( + Value{Field: map[string]string{"L@": "foo,bar,001"}}, + Value{Field: map[string]string{"L@": "foo,bar,002"}}, + false, + )) + +} diff --git a/translib/db/map.go b/translib/db/map.go index 021897c14963..e7fa15871006 100644 --- a/translib/db/map.go +++ b/translib/db/map.go @@ -23,14 +23,8 @@ Package db implements a wrapper over the go-redis/redis. package db import ( - // "fmt" - // "strconv" + "time" - // "reflect" - // "errors" - // "strings" - - // "github.com/go-redis/redis/v7" "github.com/golang/glog" // "github.com/Azure/sonic-mgmt-common/cvl" "github.com/Azure/sonic-mgmt-common/translib/tlerr" @@ -39,16 +33,95 @@ import ( func init() { } - - - func (d *DB) GetMap(ts *TableSpec, mapKey string) (string, error) { if glog.V(3) { glog.Info("GetMap: Begin: ", "ts: ", ts, " mapKey: ", mapKey) } - v, e := d.client.HGet(ts.Name, mapKey).Result() + if (d == nil) || (d.client == nil) { + return "", tlerr.TranslibDBConnectionReset{} + } + + // GetMapHits + // Time Start + var cacheHit bool + var now time.Time + var dur time.Duration + var stats Stats + if d.dbStatsConfig.TimeStats { + now = time.Now() + } + + var mAP MAP + var e error + var v string + + // If pseudoDB then do custom. TBD. + + // If cache GetFromCache (CacheHit?) + if d.dbCacheConfig.PerConnection && d.dbCacheConfig.isCacheMap(ts.Name) { + var ok bool + if mAP, ok = d.cache.Maps[ts.Name]; ok { + if v, ok = mAP.mapMap[mapKey]; ok { + cacheHit = true + } + } + } + + if !cacheHit { + + glog.Info("GetMap: RedisCmd: ", d.Name(), ": ", "HGET ", ts.Name, + mapKey) + v, e = d.client.HGet(ts.Name, mapKey).Result() + + // If cache SetCache (i.e. a cache miss) + if d.dbCacheConfig.PerConnection && d.dbCacheConfig.isCacheMap(ts.Name) { + if _, ok := d.cache.Maps[ts.Name]; !ok { + d.cache.Maps[ts.Name] = MAP{ + ts: ts, + complete: false, + mapMap: make(map[string]string, InitialMapKeyCount), + db: d, + } + } + d.cache.Maps[ts.Name].mapMap[mapKey] = v + } + + } + + // Time End, Time, Peak + if d.dbStatsConfig.MapStats { + stats = d.stats.Maps[ts.Name] + } else { + stats = d.stats.AllMaps + } + + stats.Hits++ + stats.GetMapHits++ + if cacheHit { + stats.GetMapCacheHits++ + } + + if d.dbStatsConfig.TimeStats { + dur = time.Since(now) + + if dur > stats.Peak { + stats.Peak = dur + } + stats.Time += dur + + if dur > stats.GetMapPeak { + stats.GetMapPeak = dur + } + stats.GetMapTime += dur + } + + if d.dbStatsConfig.MapStats { + d.stats.Maps[ts.Name] = stats + } else { + d.stats.AllMaps = stats + } if glog.V(3) { glog.Info("GetMap: End: ", "v: ", v, " e: ", e) @@ -63,17 +136,103 @@ func (d *DB) GetMapAll(ts *TableSpec) (Value, error) { glog.Info("GetMapAll: Begin: ", "ts: ", ts) } + if (d == nil) || (d.client == nil) { + return Value{}, tlerr.TranslibDBConnectionReset{} + } + + // GetMapAllHits + // Time Start + var cacheHit bool + var now time.Time + var dur time.Duration + var stats Stats + if d.dbStatsConfig.TimeStats { + now = time.Now() + } + + var mAP MAP + var e error var value Value + var v map[string]string + + // If pseudoDB then do custom. TBD. + + // If cache GetFromCache (CacheHit?) + if d.dbCacheConfig.PerConnection && d.dbCacheConfig.isCacheMap(ts.Name) { + var ok bool + if mAP, ok = d.cache.Maps[ts.Name]; ok { + if mAP.complete { + cacheHit = true + value = Value{Field: mAP.mapMap} + } + } + } + + if !cacheHit { + + glog.Info("GetMapAll: RedisCmd: ", d.Name(), ": ", "HGETALL ", ts.Name) + v, e = d.client.HGetAll(ts.Name).Result() - v, e := d.client.HGetAll(ts.Name).Result() + if len(v) != 0 { + + value = Value{Field: v} + + // If cache SetCache (i.e. a cache miss) + if d.dbCacheConfig.PerConnection && d.dbCacheConfig.isCacheMap(ts.Name) { + d.cache.Maps[ts.Name] = MAP{ + ts: ts, + complete: true, + mapMap: v, + db: d, + } + } + + } else { + if glog.V(1) { + glog.Info("GetMapAll: HGetAll(): empty map") + } + + if e != nil { + glog.Error("GetMapAll: ", d.Name(), + ": HGetAll(", ts.Name, "): error: ", e.Error()) + } else { + e = tlerr.TranslibRedisClientEntryNotExist{Entry: ts.Name} + } + } + + } - if len(v) != 0 { - value = Value{Field: v} + // Time End, Time, Peak + if d.dbStatsConfig.MapStats { + stats = d.stats.Maps[ts.Name] } else { - if glog.V(1) { - glog.Info("GetMapAll: HGetAll(): empty map") + stats = d.stats.AllMaps + } + + stats.Hits++ + stats.GetMapAllHits++ + if cacheHit { + stats.GetMapAllCacheHits++ + } + + if d.dbStatsConfig.TimeStats { + dur = time.Since(now) + + if dur > stats.Peak { + stats.Peak = dur + } + stats.Time += dur + + if dur > stats.GetMapAllPeak { + stats.GetMapAllPeak = dur } - e = tlerr.TranslibRedisClientEntryNotExist { Entry: ts.Name } + stats.GetMapAllTime += dur + } + + if d.dbStatsConfig.MapStats { + d.stats.Maps[ts.Name] = stats + } else { + d.stats.AllMaps = stats } if glog.V(3) { @@ -101,9 +260,11 @@ func (d *DB) SetMap(ts *TableSpec, mapKey string, mapValue string) error { return e } + // For Testing only. Do Not Use!!! ============================== // DeleteMapAll - There is no transaction support on these. +// TBD: Unexport this. : Lower case it, and reference too func (d *DB) DeleteMapAll(ts *TableSpec) error { if glog.V(3) { @@ -118,6 +279,5 @@ func (d *DB) DeleteMapAll(ts *TableSpec) error { return e } -// For Testing only. Do Not Use!!! ============================== - +// For Testing only. Do Not Use!!! ============================== diff --git a/translib/transformer/utils_test.go b/translib/transformer/utils_test.go index 8718398f600b..4dd4c20fc475 100644 --- a/translib/transformer/utils_test.go +++ b/translib/transformer/utils_test.go @@ -191,7 +191,6 @@ func processActionRequest(url string, jsonPayload string, oper string, user stri func getConfigDb() *db.DB { configDb, _ := db.NewDB(db.Options{ DBNo: db.ConfigDB, - InitIndicator: "CONFIG_DB_INITIALIZED", TableNameSeparator: "|", KeySeparator: "|", })