REPL (Read-Eval-Print Loop) environments provide interactive shells for
exploratory coding, debugging, and operational tasks. This guide covers best
practices for using REPLs effectively across multiple languages and tools.
Use python3 -i to drop into an interactive session after running a script.
# Launch interactive mode after script execution# Useful for inspecting state after a script runspython3-imy_script.py# Launch with specific startup commandspython3-c"import os; print(os.getcwd())"-i# Use PYTHONSTARTUP for automatic imports# ~/.pythonstartup.pyimportosimportsysimportjsonfrompathlibimportPathfromdatetimeimportdatetime,timedeltafrompprintimportpprint# Enable tab completion in standard REPLimportreadlineimportrlcompleterreadline.parse_and_bind("tab: complete")# Enable persistent historyimportatexitHISTFILE=os.path.expanduser("~/.python_history")try:readline.read_history_file(HISTFILE)exceptFileNotFoundError:passatexit.register(readline.write_history_file,HISTFILE)
# Set PYTHONSTARTUP in shell profileexportPYTHONSTARTUP="$HOME/.pythonstartup.py"
IPython is the recommended Python REPL for development workflows.
# Install IPython with extras# pip install ipython[all]# ~/.ipython/profile_default/ipython_config.pyc=get_config()# Auto-reload modules when they change on diskc.InteractiveShellApp.extensions=["autoreload"]c.InteractiveShellApp.exec_lines=["%autoreload 2"]# Display settingsc.InteractiveShell.colors="Linux"c.InteractiveShell.confirm_exit=Falsec.InteractiveShell.editor="vim"# History settingsc.HistoryManager.hist_file="~/.ipython/profile_default/history.sqlite"c.HistoryManager.db_log_output=True# Logging - automatically log all input/outputc.InteractiveShell.logstart=Falsec.InteractiveShell.logfile=""# Autocall - call functions without parentheses (0=off, 1=smart, 2=full)c.InteractiveShell.autocall=0# Enable async/await at the top levelc.InteractiveShell.loop_runner="asyncio"c.InteractiveShell.autoawait=True
# ~/.ipython/profile_default/startup/00-imports.py# Automatically loaded on IPython startupimportosimportsysimportjsonimportloggingfrompathlibimportPathfromdatetimeimportdatetime,timedelta,timezonefromcollectionsimportdefaultdict,CounterfrompprintimportpprintfromtypingimportAny,Dict,List,Optional# DevOps-specific imports (load if available)try:importboto3importbotocoreexceptImportError:passtry:importrequestsexceptImportError:passtry:importyamlexceptImportError:pass# Utility functions available in every sessiondefpp(obj:Any)->None:"""Pretty-print with type information."""print(f"Type: {type(obj).__name__}")pprint(obj)defload_json(path:str)->dict:"""Quick JSON file loader."""returnjson.loads(Path(path).read_text())defload_yaml(path:str)->dict:"""Quick YAML file loader."""returnyaml.safe_load(Path(path).read_text())defenv(name:str,default:str="")->str:"""Quick environment variable lookup."""returnos.environ.get(name,default)
# Time execution of a single statement%timeitsorted(range(1000))# Output: 28.4 us +/- 1.23 us per loop# Time execution of a cell (multiline)%%timeitdata=list(range(10000))sorted(data)# Output: 342 us +/- 12.5 us per loop# Run a script and keep variables in namespace%runmy_analysis.py# Run a script in debugger mode%run-dmy_analysis.py# Profile code execution%prunexpensive_function()# Line-by-line profiling (requires line_profiler)%lprun-fmy_functionmy_function(args)# Show source code of a function%psourcemy_function# Show documentation%pdocmy_function# Quick reference for an objectmy_object?# Detailed reference with source codemy_object??# List all variables in namespace%who%whos# Reset namespace (clear all variables)%reset-f# Save session commands to file%savemy_session.py1-50# Load and execute a file%loadmy_script.py# Edit and execute in external editor%editmy_function# View command history%history%history-n1-20# Search history%history-g"import*boto"# System shell commands!ls-la!gitstatusfiles=!ls*.pyprint(files)# Change directory%cd/path/to/project# Bookmark directories%bookmarkproject/path/to/project%cd-bproject# Store variables between sessions%storemy_variable%store-r# Restore stored variables# Auto-reload modules (with extension loaded)%autoreload2
# Drop into debugger on exception%pdbon# Set breakpoint in codedefprocess_data(items):foriteminitems:ifitem.get("status")=="error":breakpoint()# Drops into pdb/ipdbprocess_item(item)# Post-mortem debugging after exception%debug# IPython debugger commands in pdb/ipdb# n - next line# s - step into function# c - continue execution# p variable - print variable# pp variable - pretty-print variable# l - list source code# w - show call stack# u - move up in call stack# d - move down in call stack# b 42 - set breakpoint at line 42# b module:42 - set breakpoint in module at line 42# cl - clear breakpoints# q - quit debugger# Embed IPython in any script for interactive debuggingfromIPythonimportembeddefcomplex_calculation(data):intermediate=transform(data)embed()# Opens IPython shell here with local variablesreturnfinalize(intermediate)# Use ipdb for enhanced debugging# pip install ipdbimportipdbdefprocess_pipeline(config):pipeline=build_pipeline(config)ipdb.set_trace()# Enhanced pdb with IPython featuresresult=pipeline.run()returnresult
# ~/.config/ptpython/config.pyfromptpython.layoutimportCompletionVisualisationdefconfigure(repl):# Use vi or emacs keybindingsrepl.vi_mode=False# Enable mouse supportrepl.enable_mouse_support=True# Show function signature popuprepl.show_signature=True# Show docstring popuprepl.show_docstring=True# Highlight matching parenthesesrepl.highlight_matching_parenthesis=True# Completion stylerepl.completion_visualisation=CompletionVisualisation.POP_UP# Show line numbersrepl.show_line_numbers=True# Enable auto-suggestions from historyrepl.enable_auto_suggest=True# Confirm on exitrepl.confirm_exit=False# Enable input validation (syntax checking)repl.enable_input_validation=True# Color schemerepl.color_depth="DEPTH_24_BIT"repl.use_code_colorscheme("monokai")# Enable title in terminalrepl.enable_title=True# Paste mode - handle multiline paste correctlyrepl.paste_mode=Falserepl.enable_open_in_editor=True
// repl-server.js - Create a custom REPL with preloaded contextconstrepl=require("repl");constfs=require("fs");constpath=require("path");// Start custom REPL with project contextconstreplServer=repl.start({prompt:"app> ",useColors:true,preview:true,terminal:true,});// Preload commonly used modulesreplServer.context.fs=fs;replServer.context.path=path;replServer.context._=require("lodash");// Add database connectionconst{Pool}=require("pg");replServer.context.db=newPool({connectionString:process.env.DATABASE_URL,});// Add helper functionsreplServer.context.query=async(sql,params)=>{constresult=awaitreplServer.context.db.query(sql,params);console.table(result.rows);returnresult.rows;};replServer.context.findUser=async(email)=>{returnreplServer.context.query("SELECT * FROM users WHERE email = $1",[email]);};// Enable command history persistenceconsthistoryFile=path.join(process.env.HOME,".node_repl_history");try{consthistory=fs.readFileSync(historyFile,"utf8");history.split("\n").reverse().filter(Boolean).forEach((line)=>{replServer.history.push(line);});}catch{// No history file yet}replServer.on("exit",()=>{fs.writeFileSync(historyFile,replServer.history.join("\n"));process.exit();});// Custom commandsreplServer.defineCommand("models",{help:"List all database models",action(){constmodels=fs.readdirSync("./models").filter((f)=>f.endsWith(".js"));console.log("Available models:",models);this.displayPrompt();},});replServer.defineCommand("routes",{help:"List all API routes",action(){constroutes=require("./routes");console.table(routes.stack.map((r)=>({method:r.route?.method||"USE",path:r.route?.path||r.regexp,})));this.displayPrompt();},});
# Launch terraform console with variable file# terraform console -var-file=environments/dev.tfvars# Inspect resource attributes>aws_vpc.main.id"vpc-0abc123def456">aws_vpc.main.cidr_block"10.0.0.0/16"# Inspect lists and maps>aws_subnet.private[*].id["subnet-0abc123","subnet-0def456","subnet-0ghi789",]>aws_subnet.private[*].availability_zone["us-east-1a","us-east-1b","us-east-1c",]# Test string interpolation>"arn:aws:s3:::${var.bucket_name}/*""arn:aws:s3:::my-app-bucket/*"# Test conditional expressions>var.environment=="prod"?3:11# Test for_each and for expressions>{forsinaws_subnet.private:s.availability_zone=>s.id}{"us-east-1a"="subnet-0abc123""us-east-1b"="subnet-0def456""us-east-1c"="subnet-0ghi789"}# Test list comprehensions>[forsinvar.subnets:s.cidrifs.public]["10.0.1.0/24","10.0.2.0/24",]# Test functions>cidrsubnet("10.0.0.0/16",8,1)"10.0.1.0/24">cidrsubnet("10.0.0.0/16",8,2)"10.0.2.0/24">formatdate("YYYY-MM-DD",timestamp())"2025-01-15">length(var.availability_zones)3>lookup(var.instance_types,var.environment,"t3.micro")"t3.large"# Test regex>regex("^([a-z]+)-([0-9]+)$", "app-123")["app","123",]>can(regex("^[a-z]+$",var.name))true# Test type conversion>tolist(toset(["a", "b", "a", "c"]))["a","b","c",]>tonumber("42")42>tobool("true")true# Inspect module outputs>module.vpc.vpc_id"vpc-0abc123def456">module.vpc.private_subnet_ids["subnet-0abc123","subnet-0def456",]# Test jsonencode/jsondecode>jsonencode({name=var.app_nameenv=var.environmenttags=var.common_tags})"{\"env\":\"dev\",\"name\":\"myapp\",\"tags\":{\"team\":\"platform\"}}">jsondecode(file("config.json")){"database"={"host"="localhost""port"=5432}}
# Test complex merge operations before applying>merge(var.common_tags,{Name="${var.project}-${var.environment}"ManagedBy="terraform"Environment=var.environment}){"Environment"="dev""ManagedBy"="terraform""Name"="myapp-dev""Owner"="platform-team""Project"="myapp"}# Validate CIDR calculations>[foriinrange(4):cidrsubnet("10.0.0.0/16",8,i)]["10.0.0.0/24","10.0.1.0/24","10.0.2.0/24","10.0.3.0/24",]# Test templatefile rendering>templatefile("templates/user_data.sh.tpl",{cluster_name=var.cluster_nameregion=var.regionnode_labels=join(",",var.node_labels)})"#!/bin/bash\nset -euo pipefail\n..."# Inspect data source results>data.aws_ami.ubuntu.id"ami-0abc123def456">data.aws_ami.ubuntu.name"ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20250101"# Test try() for error handling>try(var.optional_config.database.host,"localhost")"localhost">try(tonumber("not-a-number"),0)0# Test coalesce for default values>coalesce(var.custom_domain,"${var.app_name}.example.com")"myapp.example.com">coalescelist(var.custom_cidrs,["10.0.0.0/16"])["10.0.0.0/16",]
# Execute a command in a running container
kubectlexec-itdeployment/myapp--/bin/bash
# Execute in a specific container within a multi-container pod
kubectlexec-itpod/myapp-5d4f6b7c8-abc12\-csidecar-container--/bin/sh
# Run a single command without interactive shell
kubectlexecpod/myapp-5d4f6b7c8-abc12--\cat/app/config/settings.yaml
# Execute with namespace
kubectlexec-itdeployment/myapp\-nproduction--/bin/bash
# Check environment variables in a container
kubectlexecpod/myapp-5d4f6b7c8-abc12--env|sort
# Inspect mounted secrets
kubectlexecpod/myapp-5d4f6b7c8-abc12--\ls-la/var/run/secrets/kubernetes.io/serviceaccount/
# Test network connectivity from within a pod
kubectlexec-itpod/myapp-5d4f6b7c8-abc12--\curl-shttp://other-service:8080/health
# Check DNS resolution
kubectlexec-itpod/myapp-5d4f6b7c8-abc12--\nslookupother-service.default.svc.cluster.local
# Debug a running pod with an ephemeral debug container
kubectldebug-itpod/myapp-5d4f6b7c8-abc12\--image=busybox:latest\--target=myapp
# Debug with a full toolkit image
kubectldebug-itpod/myapp-5d4f6b7c8-abc12\--image=nicolaka/netshoot\--target=myapp
# Create a debug copy of a pod with a different image
kubectldebugpod/myapp-5d4f6b7c8-abc12-it\--copy-to=myapp-debug\--container=myapp\--image=myapp:debug
# Debug a node
kubectldebugnode/worker-node-1-it\--image=busybox:latest
# Common debugging commands inside debug containers# Check network connectivity
pingother-service
nslookupkubernetes.default.svc.cluster.local
curl-vhttp://service-name:8080/health
wget-qO-http://service-name:8080/metrics
# Check filesystem and processes
ls-la/proc/1/root/app/
cat/proc/1/environ|tr'\0''\n'
psaux
# Network diagnostics
ss-tlnp
netstat-an
tcpdump-ieth0-nport8080
iptables-L-n
# Run an interactive pod for troubleshooting
kubectlrundebug-shell--rm-it\--image=busybox:latest\--restart=Never--/bin/sh
# Run with a specific service account
kubectlrundebug-shell--rm-it\--image=busybox:latest\--restart=Never\--serviceaccount=myapp-sa--/bin/sh
# Run a curl pod for API testing
kubectlruncurl-test--rm-it\--image=curlimages/curl:latest\--restart=Never--\curl-shttp://myapp-service:8080/api/health
# Run a DNS debugging pod
kubectlrundns-debug--rm-it\--image=registry.k8s.io/e2e-test-images/jessie-dnsutils:1.3\--restart=Never--/bin/bash
# Inside DNS debug pod
nslookupkubernetes.default
digmyapp-service.default.svc.cluster.local
hostmyapp-service
# Run a database client pod
kubectlrunpsql-client--rm-it\--image=postgres:16\--restart=Never\--env="PGPASSWORD=$DB_PASSWORD"--\psql-hdb-service-Umyuser-dmydb
# Run a Redis client pod
kubectlrunredis-client--rm-it\--image=redis:7\--restart=Never--\redis-cli-hredis-service-p6379# Run with resource limits
kubectlrundebug-shell--rm-it\--image=busybox:latest\--restart=Never\--limits="cpu=200m,memory=256Mi"--/bin/sh
# Run in a specific namespace with labels
kubectlrundebug-shell--rm-it\--namespace=production\--image=busybox:latest\--restart=Never\--labels="app=debug,team=platform"--/bin/sh
-- Connect to PostgreSQL-- psql -h localhost -U myuser -d mydb -p 5432-- Connection with SSL-- psql "host=db.example.com dbname=mydb user=myuser sslmode=require"-- psql meta-commands\l-- List all databases\cmydb-- Connect to database\dt-- List tables in current schema\dt+-- List tables with size info\dtpublic.*-- List tables in public schema\dusers-- Describe table structure\d+users-- Detailed table description\di-- List indexes\di+users-- List indexes for specific table\dv-- List views\df-- List functions\dn-- List schemas\du-- List roles/users\dp-- List access privileges\dx-- List installed extensions-- Query formatting\x-- Toggle expanded display (vertical)\xauto-- Auto-switch based on width\psetformatcsv-- Output as CSV\psetformathtml-- Output as HTML\psetborder2-- Full table borders\timing-- Show query execution time-- Save output to file\ooutput.csvSELECT*FROMusersWHEREstatus='active';\o-- Stop writing to file-- Execute SQL from file\i/path/to/migration.sql-- Edit query in external editor\e-- Show last error in detail\errverbose-- Variable substitution\setactive_status'active'SELECT*FROMusersWHEREstatus=:'active_status';-- Transaction controlBEGIN;UPDATEusersSETstatus='inactive'WHERElast_login<NOW()-INTERVAL'1 year';-- Inspect before committingSELECTCOUNT(*)FROMusersWHEREstatus='inactive';ROLLBACK;-- or COMMIT;-- Useful psql configuration (~/.psqlrc)-- \set QUIET 1-- \pset null '[NULL]'-- \set HISTFILE ~/.psql_history-:DBNAME-- \set HISTCONTROL ignoredups-- \set COMP_KEYWORD_CASE upper-- \timing-- \x auto-- \set PROMPT1 '%[%033[1;32m%]%n@%/%[%033[0m%] %# '-- \set QUIET 0
-- Connect to MySQL-- mysql -h localhost -u myuser -p mydb-- MySQL meta-commandsSHOWDATABASES;USEmydb;SHOWTABLES;DESCRIBEusers;SHOWCREATETABLEusers;SHOWINDEXFROMusers;SHOWPROCESSLIST;SHOWVARIABLESLIKE'max_connections';SHOWSTATUSLIKE'Threads%';SHOWGRANTSFOR'myuser'@'%';-- Session configurationSETSESSIONgroup_concat_max_len=1000000;SETSESSIONsql_mode='STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION';-- Query profilingSETprofiling=1;SELECT*FROMusersWHEREemail='test@example.com';SHOWPROFILES;SHOWPROFILEFORQUERY1;-- Explain query execution planEXPLAINFORMAT=JSONSELECTu.id,u.email,COUNT(o.id)ASorder_countFROMusersuLEFTJOINordersoONo.user_id=u.idWHEREu.status='active'GROUPBYu.id,u.emailHAVINGorder_count>5;-- Safe update mode (prevents accidental mass updates)SETSQL_SAFE_UPDATES=1;-- Output formatting-- mysql --table (default tabular)-- mysql --batch (tab-separated, no headers)-- mysql --xml-- mysql --html-- Execute from command line-- mysql -e "SELECT COUNT(*) FROM users" mydb-- mysql < migration.sql
# ~/.my.cnf - MySQL client configuration[mysql]auto-rehashprompt="\\u@\\h [\\d]> "pager="less -SFX"safe-updatesshow-warnings[client]default-character-set=utf8mb4
# Connect to Redis
redis-cli-hlocalhost-p6379# Connect with authentication
redis-cli-hredis.example.com-p6379-a"$REDIS_PASSWORD"# Connect to specific database
redis-cli-n2# Connect with TLS
redis-cli--tls--cert./redis.crt--key./redis.key\--cacert./ca.crt-hredis.example.com
-- Redis interactive commands
PING
-- PONG
-- Key operations
SET user:1001 '{"name":"Alice","role":"admin"}'
GET user:1001
-- {"name":"Alice","role":"admin"}
DEL user:1001
EXISTS user:1001
-- (integer) 0
TYPE user:1001
TTL user:1001
-- Set with expiration
SETEX session:abc123 3600 '{"user_id":1001}'
TTL session:abc123
-- (integer) 3597
-- Key pattern search (use SCAN in production)
KEYS user:*
-- Warning: KEYS blocks Redis, use SCAN instead
SCAN 0 MATCH user:* COUNT 100
-- Hash operations
HSET user:1001 name "Alice" role "admin" login_count 42
HGET user:1001 name
HGETALL user:1001
HINCRBY user:1001 login_count 1
-- List operations
LPUSH notifications:1001 "New message from Bob"
LPUSH notifications:1001 "Order shipped"
LRANGE notifications:1001 0 -1
LLEN notifications:1001
-- Set operations
SADD online_users 1001 1002 1003
SMEMBERS online_users
SISMEMBER online_users 1001
SCARD online_users
-- Sorted set operations
ZADD leaderboard 100 "Alice" 85 "Bob" 92 "Charlie"
ZRANGE leaderboard 0 -1 WITHSCORES
ZREVRANGE leaderboard 0 2 WITHSCORES
ZSCORE leaderboard "Alice"
-- Server information
INFO server
INFO memory
INFO keyspace
INFO replication
DBSIZE
CONFIG GET maxmemory
CONFIG GET maxmemory-policy
-- Monitor commands in real-time (use sparingly)
MONITOR
-- Slow log analysis
SLOWLOG GET 10
SLOWLOG LEN
SLOWLOG RESET
-- Memory analysis
MEMORY USAGE user:1001
MEMORY DOCTOR
# IPython workflow for exploring API responsesimportrequestsimportjsonfrompprintimportpprint# Fetch and explore API dataresponse=requests.get("https://api.example.com/v1/users",headers={"Authorization":f"Bearer {token}"},)data=response.json()# Quick data inspectiontype(data)# dictdata.keys()# dict_keys(['users', 'pagination'])len(data["users"])# 50# Explore structure of first itempprint(data["users"][0])# {# 'id': 1001,# 'email': 'alice@example.com',# 'role': 'admin',# 'created_at': '2024-01-15T10:30:00Z',# 'metadata': {'department': 'engineering', 'team': 'platform'}# }# Quick analysis with list comprehensionsroles=[u["role"]foruindata["users"]]fromcollectionsimportCounterCounter(roles)# Counter({'user': 35, 'admin': 10, 'moderator': 5})# Filter and transformadmins=[{"email":u["email"],"team":u["metadata"].get("team")}foruindata["users"]ifu["role"]=="admin"]pprint(admins[:3])# Once exploration is done, formalize into a functiondefget_users_by_role(role,token):"""Fetch users filtered by role."""response=requests.get("https://api.example.com/v1/users",headers={"Authorization":f"Bearer {token}"},params={"role":role},)response.raise_for_status()returnresponse.json()["users"]# Save the refined function%saveuser_utils.py25-35
# Step 1: Prototype in REPL# IPython session - explore and iterate quicklyimporthashlibimportjson# Experiment with hashing approachesdata={"user_id":1001,"action":"login","timestamp":"2025-01-15T10:30:00Z"}payload=json.dumps(data,sort_keys=True)hash_value=hashlib.sha256(payload.encode()).hexdigest()print(hash_value[:16])# Short hash for deduplication# Try different serialization approaches# Test edge cases interactivelydata_with_none={"user_id":1001,"action":None}payload=json.dumps(data_with_none,sort_keys=True,default=str)print(payload)# Handles None -> "null"# Step 2: Formalize into a function (still in REPL)defcompute_event_hash(event:dict)->str:"""Compute deterministic hash for event deduplication."""payload=json.dumps(event,sort_keys=True,default=str)returnhashlib.sha256(payload.encode()).hexdigest()[:16]# Test in REPLassertcompute_event_hash(data)==compute_event_hash(data)assertcompute_event_hash({"a":1})!=compute_event_hash({"b":1})print("Tests pass")# Step 3: Save to file%save-fevent_utils.py8-12
# Step 4: Production module (event_utils.py after cleanup)"""Event utility functions for deduplication."""importhashlibimportjsonfromtypingimportAnydefcompute_event_hash(event:dict[str,Any])->str:"""Compute deterministic hash for event deduplication. Args: event: Dictionary containing event data. Returns: 16-character hexadecimal hash string. """payload=json.dumps(event,sort_keys=True,default=str)returnhashlib.sha256(payload.encode()).hexdigest()[:16]
# Step 5: Tests (test_event_utils.py)"""Tests for event utility functions."""importpytestfromevent_utilsimportcompute_event_hashclassTestComputeEventHash:"""Tests for compute_event_hash."""deftest_deterministic_output(self):"""Same input produces same hash."""event={"user_id":1001,"action":"login"}assertcompute_event_hash(event)==compute_event_hash(event)deftest_different_input_different_hash(self):"""Different inputs produce different hashes."""event_a={"user_id":1001,"action":"login"}event_b={"user_id":1002,"action":"login"}assertcompute_event_hash(event_a)!=compute_event_hash(event_b)deftest_key_order_independent(self):"""Hash is independent of key order."""event_a={"action":"login","user_id":1001}event_b={"user_id":1001,"action":"login"}assertcompute_event_hash(event_a)==compute_event_hash(event_b)deftest_handles_none_values(self):"""None values are handled correctly."""event={"user_id":1001,"action":None}result=compute_event_hash(event)assertisinstance(result,str)assertlen(result)==16deftest_hash_length(self):"""Hash output is exactly 16 characters."""event={"key":"value"}assertlen(compute_event_hash(event))==16
# NEVER type passwords directly in REPL commands# Bad - password visible in shell history
psql-hdb.example.com-Uadmin-pmypasswordmydb
mysql-uadmin-pmypasswordmydb
redis-cli-amypassword
# Good - use environment variablesexportPGPASSWORD="$(vaultread-field=passwordsecret/db)"
psql-hdb.example.com-Uadminmydb
# Good - use password file# ~/.pgpass format: hostname:port:database:username:passwordecho"db.example.com:5432:mydb:admin:$(vaultread-field=passwordsecret/db)"\>~/.pgpass
chmod600~/.pgpass
psql-hdb.example.com-Uadminmydb
# Good - use MySQL option file
cat>~/.my.cnf<< 'MYSQL_CONF'[client]user = adminpassword = "${DB_PASSWORD}"MYSQL_CONF
chmod600~/.my.cnf
# Good - use Redis AUTH with environment variable
redis-cli-hredis.example.com-a"$REDIS_AUTH_TOKEN"# Good - use connection strings from secret managerexportDATABASE_URL="$(awssecretsmanagerget-secret-value\--secret-idprod/db--querySecretString--outputtext|\jq-r'.connection_string')"
psql"$DATABASE_URL"
# Python - control history file permissions
chmod600~/.python_history
chmod600~/.ipython/profile_default/history.sqlite
# Disable history for sensitive sessions# PythonPYTHONDONTWRITEBYTECODE=1python3-c"import readlinereadline.clear_history()"-i
# IPython - disable history for session
ipython--HistoryManager.enabled=False
# Node.js - disable historyNODE_REPL_HISTORY=""node
# psql - disable history for sessionPSQL_HISTORY=/dev/nullpsql-hdb.example.com-Uadminmydb
# MySQL - disable historyMYSQL_HISTFILE=/dev/nullmysql-hdb.example.com-uadminmydb
# Redis - commands with passwords are not logged by default# But verify with CONFIG GET logfile# Bash - temporarily disable historyset+ohistory# ... sensitive commands ...set-ohistory# Clear specific history entries (bash)history-d$(history|grep"password"|awk'{print $1}')
# IPython - exclude sensitive patterns from history# ~/.ipython/profile_default/ipython_config.pyc.HistoryManager.enabled=True# Custom input transformer to warn about secretsfromIPython.core.inputtransformer2importTransformerManagerimportreSENSITIVE_PATTERNS=[r'password\s*=\s*["\'][^"\']+["\']',r'secret\s*=\s*["\'][^"\']+["\']',r'token\s*=\s*["\'][^"\']+["\']',r'api_key\s*=\s*["\'][^"\']+["\']',r'AWS_SECRET_ACCESS_KEY\s*=\s*["\'][^"\']+["\']',]defcheck_for_secrets(lines):"""Warn if input contains potential secrets."""combined="\n".join(lines)forpatterninSENSITIVE_PATTERNS:ifre.search(pattern,combined,re.IGNORECASE):print("\033[91mWARNING: Input may contain secrets. ""Use environment variables instead.\033[0m")breakreturnlines
# rbac-debug-role.yaml# Restrict interactive debugging to specific namespaces and actionsapiVersion:rbac.authorization.k8s.io/v1kind:Rolemetadata:name:debug-shell-rolenamespace:stagingrules:# Allow exec into pods for debugging-apiGroups:[""]resources:["pods/exec"]verbs:["create"]# Allow ephemeral containers for debugging-apiGroups:[""]resources:["pods/ephemeralcontainers"]verbs:["patch"]# Allow viewing pod logs and status-apiGroups:[""]resources:["pods","pods/log"]verbs:["get","list"]# Allow creating temporary debug pods-apiGroups:[""]resources:["pods"]verbs:["create","delete"]# Restrict to debug pods onlyresourceNames:["debug-shell"]---apiVersion:rbac.authorization.k8s.io/v1kind:RoleBindingmetadata:name:debug-shell-bindingnamespace:stagingsubjects:-kind:Username:developer@example.comapiGroup:rbac.authorization.k8s.ioroleRef:kind:Rolename:debug-shell-roleapiGroup:rbac.authorization.k8s.io
# Bad - running exploratory queries against productionimportboto3session=boto3.Session(profile_name="production")ec2=session.client("ec2")# Dangerous: could accidentally terminate instancesec2.terminate_instances(InstanceIds=["i-0abc123"])# Good - use read-only sessions for explorationimportboto3frombotocore.configimportConfigread_only_config=Config(parameter_validation=True,retries={"max_attempts":0},)# Use a read-only IAM rolesession=boto3.Session(profile_name="production-readonly")ec2=session.client("ec2",config=read_only_config)# Safe: only describe/list operationsinstances=ec2.describe_instances()
-- Bad - running unguarded updates in database REPLUPDATEusersSETstatus='inactive';-- Oops: forgot WHERE clause, updated ALL users-- Good - use transactions and verify before committingBEGIN;UPDATEusersSETstatus='inactive'WHERElast_login<NOW()-INTERVAL'2 years';-- Check affected rows before committingSELECTCOUNT(*)FROMusersWHERElast_login<NOW()-INTERVAL'2 years';-- Returns: 42 (expected range)-- Verify with a sampleSELECTid,email,status,last_loginFROMusersWHERElast_login<NOW()-INTERVAL'2 years'LIMIT5;ROLLBACK;-- Use ROLLBACK first, then COMMIT after verification
# Bad - credentials in REPL historydb_password="super_secret_password_123"api_key="sk-abc123def456"conn=psycopg2.connect(host="db.example.com",password="super_secret_password_123",)# Good - use environment variables or secret managersimportosdb_password=os.environ["DB_PASSWORD"]api_key=os.environ["API_KEY"]conn=psycopg2.connect(host="db.example.com",password=os.environ["DB_PASSWORD"],)# Good - use credential helpersimportkeyringdb_password=keyring.get_password("mydb","admin")conn=psycopg2.connect(host="db.example.com",password=keyring.get_password("mydb","admin"),)
# Bad - copy-pasting REPL output into config files# REPL session:>>>importjson>>>config={"workers":4,"timeout":30}>>>print(json.dumps(config))# Then manually pasting into config.json# Good - generate configuration files programmaticallyimportjsonfrompathlibimportPathdefgenerate_config(environment:str)->dict:"""Generate environment-specific configuration."""base={"log_level":"INFO","metrics_enabled":True,}overrides={"dev":{"workers":2,"timeout":60,"debug":True},"staging":{"workers":4,"timeout":30,"debug":False},"prod":{"workers":8,"timeout":15,"debug":False},}return{**base,**overrides.get(environment,{})}# Generate and write configconfig=generate_config("prod")Path("config.json").write_text(json.dumps(config,indent=2,sort_keys=True))
# Bad - leaving database connections open after REPL sessionimportpsycopg2conn=psycopg2.connect(dsn="postgresql://localhost/mydb")cursor=conn.cursor()cursor.execute("SELECT * FROM users")# ... explore data, then close terminal without cleanup# Good - use context managers even in REPL sessionsimportpsycopg2withpsycopg2.connect(dsn="postgresql://localhost/mydb")asconn:withconn.cursor()ascursor:cursor.execute("SELECT * FROM users LIMIT 10")forrowincursor.fetchall():print(row)# Connection automatically closed# Good - register cleanup with atexitimportatexitimportpsycopg2conn=psycopg2.connect(dsn="postgresql://localhost/mydb")atexit.register(conn.close)# Now safe to explore interactivelycursor=conn.cursor()cursor.execute("SELECT COUNT(*) FROM users")print(cursor.fetchone())
# Scenario: Pod keeps restarting in production# Step 1: Identify the problem pod
kubectlgetpods-nproduction-lapp=payment-service
# NAME READY STATUS RESTARTS# payment-service-7d8f9b6c5-x4k2n 0/1 CrashLoopBackOff 15# Step 2: Check recent events
kubectldescribepodpayment-service-7d8f9b6c5-x4k2n-nproduction|\grep-A20"Events:"# Events:# Type Reason Age Message# Warning BackOff 2m Back-off restarting failed container# Step 3: Check logs from current and previous container
kubectllogspayment-service-7d8f9b6c5-x4k2n-nproduction--tail=50
kubectllogspayment-service-7d8f9b6c5-x4k2n-nproduction--previous--tail=50# Error: FATAL: password authentication failed for user "payment_svc"# Step 4: Verify the secret exists and is mounted
kubectlgetsecretpayment-db-credentials-nproduction-oyaml
kubectlexecpayment-service-7d8f9b6c5-x4k2n-nproduction--\cat/var/run/secrets/db/password2>/dev/null||\echo"Cannot read secret - container may not be running"# Step 5: Test database connectivity from a debug pod
kubectlrundb-debug--rm-it\--image=postgres:16\--restart=Never\--namespace=production\--env="PGPASSWORD=$(kubectlgetsecretpayment-db-credentials\-nproduction-ojsonpath='{.data.password}'|base64-d)"\--psql-hpayment-db-service-Upayment_svc-dpayments-c"SELECT 1;"# If this fails, the credentials in the secret are wrong# Step 6: Check resource limits
kubectltoppodpayment-service-7d8f9b6c5-x4k2n-nproduction
kubectldescribepodpayment-service-7d8f9b6c5-x4k2n-nproduction|\grep-A5"Limits:"# Limits:# cpu: 500m# memory: 256Mi <-- May be too low# Step 7: Check if the issue is OOMKilled
kubectlgetpodpayment-service-7d8f9b6c5-x4k2n-nproduction\-ojsonpath='{.status.containerStatuses[0].lastState.terminated.reason}'# OOMKilled