天天看点

马尔科夫奖励过程的python示例

import numpy as np

# 状态集合
states = ["Rainy", "Sunny"]

# 行动集合
actions = ["Stay", "Go_out"]

# 概率转移矩阵
transition_probabilities = [
    [0.7, 0.3],
    [0.4, 0.6]
]

# 奖励函数
rewards = [
    [0, 0],
    [5, 0]
]

# 选择最优动作
def get_optimal_action(state):
    if state == "Rainy":
        return "Stay"
    return "Go_out"

# 计算概率
def get_transition_probability(current_state, next_state, action):
    current_state_index = states.index(current_state)
    next_state_index = states.index(next_state)
    action_index = actions.index(action)
    return transition_probabilities[current_state_index][next_state_index]

# 计算奖励
def get_reward(current_state, action):
    current_state_index = states.index(current_state)
    action_index = actions.index(action)
    return rewards[current_state_index][action_index]

# 初始状态
current_state = "Rainy"

# 奖励总和
total_reward = 0

# 模拟5次决策
for i in range(5):
    action = get_optimal_action(current_state)
    reward = get_reward(current_state, action)
    total_reward += reward
    next_state = np.random.choice(states, p=transition_probabilities[states.index(current_state)])
    current_state = next_state

print("Total reward:", total_reward)
           

继续阅读