DDR爱好者之家 Design By 杰米

ABSIndividual.py

import numpy as np
import ObjFunction


class ABSIndividual:

  '''
  individual of artificial bee swarm algorithm
  '''

  def __init__(self, vardim, bound):
    '''
    vardim: dimension of variables
    bound: boundaries of variables
    '''
    self.vardim = vardim
    self.bound = bound
    self.fitness = 0.
    self.trials = 0

  def generate(self):
    '''
    generate a random chromsome for artificial bee swarm algorithm
    '''
    len = self.vardim
    rnd = np.random.random(size=len)
    self.chrom = np.zeros(len)
    for i in xrange(0, len):
      self.chrom[i] = self.bound[0, i] +         (self.bound[1, i] - self.bound[0, i]) * rnd[i]

  def calculateFitness(self):
    '''
    calculate the fitness of the chromsome
    '''
    self.fitness = ObjFunction.GrieFunc(
      self.vardim, self.chrom, self.bound)

ABS.py

import numpy as np
from ABSIndividual import ABSIndividual
import random
import copy
import matplotlib.pyplot as plt


class ArtificialBeeSwarm:

  '''
  the class for artificial bee swarm algorithm
  '''

  def __init__(self, sizepop, vardim, bound, MAXGEN, params):
    '''
    sizepop: population sizepop
    vardim: dimension of variables
    bound: boundaries of variables
    MAXGEN: termination condition
    params: algorithm required parameters, it is a list which is consisting of[trailLimit, C]
    '''
    self.sizepop = sizepop
    self.vardim = vardim
    self.bound = bound
    self.foodSource = self.sizepop / 2
    self.MAXGEN = MAXGEN
    self.params = params
    self.population = []
    self.fitness = np.zeros((self.sizepop, 1))
    self.trace = np.zeros((self.MAXGEN, 2))

  def initialize(self):
    '''
    initialize the population of abs
    '''
    for i in xrange(0, self.foodSource):
      ind = ABSIndividual(self.vardim, self.bound)
      ind.generate()
      self.population.append(ind)

  def evaluation(self):
    '''
    evaluation the fitness of the population
    '''
    for i in xrange(0, self.foodSource):
      self.population[i].calculateFitness()
      self.fitness[i] = self.population[i].fitness

  def employedBeePhase(self):
    '''
    employed bee phase
    '''
    for i in xrange(0, self.foodSource):
      k = np.random.random_integers(0, self.vardim - 1)
      j = np.random.random_integers(0, self.foodSource - 1)
      while j == i:
        j = np.random.random_integers(0, self.foodSource - 1)
      vi = copy.deepcopy(self.population[i])
      # vi.chrom = vi.chrom + np.random.uniform(-1, 1, self.vardim) * (
      #   vi.chrom - self.population[j].chrom) + np.random.uniform(0.0, self.params[1], self.vardim) * (self.best.chrom - vi.chrom)
      # for k in xrange(0, self.vardim):
      #   if vi.chrom[k] < self.bound[0, k]:
      #     vi.chrom[k] = self.bound[0, k]
      #   if vi.chrom[k] > self.bound[1, k]:
      #     vi.chrom[k] = self.bound[1, k]
      vi.chrom[
        k] += np.random.uniform(low=-1, high=1.0, size=1) * (vi.chrom[k] - self.population[j].chrom[k])
      if vi.chrom[k] < self.bound[0, k]:
        vi.chrom[k] = self.bound[0, k]
      if vi.chrom[k] > self.bound[1, k]:
        vi.chrom[k] = self.bound[1, k]
      vi.calculateFitness()
      if vi.fitness > self.fitness[fi]:
        self.population[fi] = vi
        self.fitness[fi] = vi.fitness
        if vi.fitness > self.best.fitness:
          self.best = vi
      vi.calculateFitness()
      if vi.fitness > self.fitness[i]:
        self.population[i] = vi
        self.fitness[i] = vi.fitness
        if vi.fitness > self.best.fitness:
          self.best = vi
      else:
        self.population[i].trials += 1

  def onlookerBeePhase(self):
    '''
    onlooker bee phase
    '''
    accuFitness = np.zeros((self.foodSource, 1))
    maxFitness = np.max(self.fitness)

    for i in xrange(0, self.foodSource):
      accuFitness[i] = 0.9 * self.fitness[i] / maxFitness + 0.1

    for i in xrange(0, self.foodSource):
      for fi in xrange(0, self.foodSource):
        r = random.random()
        if r < accuFitness[i]:
          k = np.random.random_integers(0, self.vardim - 1)
          j = np.random.random_integers(0, self.foodSource - 1)
          while j == fi:
            j = np.random.random_integers(0, self.foodSource - 1)
          vi = copy.deepcopy(self.population[fi])
          # vi.chrom = vi.chrom + np.random.uniform(-1, 1, self.vardim) * (
          #   vi.chrom - self.population[j].chrom) + np.random.uniform(0.0, self.params[1], self.vardim) * (self.best.chrom - vi.chrom)
          # for k in xrange(0, self.vardim):
          #   if vi.chrom[k] < self.bound[0, k]:
          #     vi.chrom[k] = self.bound[0, k]
          #   if vi.chrom[k] > self.bound[1, k]:
          #     vi.chrom[k] = self.bound[1, k]
          vi.chrom[
            k] += np.random.uniform(low=-1, high=1.0, size=1) * (vi.chrom[k] - self.population[j].chrom[k])
          if vi.chrom[k] < self.bound[0, k]:
            vi.chrom[k] = self.bound[0, k]
          if vi.chrom[k] > self.bound[1, k]:
            vi.chrom[k] = self.bound[1, k]
          vi.calculateFitness()
          if vi.fitness > self.fitness[fi]:
            self.population[fi] = vi
            self.fitness[fi] = vi.fitness
            if vi.fitness > self.best.fitness:
              self.best = vi
          else:
            self.population[fi].trials += 1
          break

  def scoutBeePhase(self):
    '''
    scout bee phase
    '''
    for i in xrange(0, self.foodSource):
      if self.population[i].trials > self.params[0]:
        self.population[i].generate()
        self.population[i].trials = 0
        self.population[i].calculateFitness()
        self.fitness[i] = self.population[i].fitness

  def solve(self):
    '''
    the evolution process of the abs algorithm
    '''
    self.t = 0
    self.initialize()
    self.evaluation()
    best = np.max(self.fitness)
    bestIndex = np.argmax(self.fitness)
    self.best = copy.deepcopy(self.population[bestIndex])
    self.avefitness = np.mean(self.fitness)
    self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness
    self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
    print("Generation %d: optimal function value is: %f; average function value is %f" % (
      self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
    while self.t < self.MAXGEN - 1:
      self.t += 1
      self.employedBeePhase()
      self.onlookerBeePhase()
      self.scoutBeePhase()
      best = np.max(self.fitness)
      bestIndex = np.argmax(self.fitness)
      if best > self.best.fitness:
        self.best = copy.deepcopy(self.population[bestIndex])
      self.avefitness = np.mean(self.fitness)
      self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness
      self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
      print("Generation %d: optimal function value is: %f; average function value is %f" % (
        self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
    print("Optimal function value is: %f; " % self.trace[self.t, 0])
    print "Optimal solution is:"
    print self.best.chrom
    self.printResult()

  def printResult(self):
    '''
    plot the result of abs algorithm
    '''
    x = np.arange(0, self.MAXGEN)
    y1 = self.trace[:, 0]
    y2 = self.trace[:, 1]
    plt.plot(x, y1, 'r', label='optimal value')
    plt.plot(x, y2, 'g', label='average value')
    plt.xlabel("Iteration")
    plt.ylabel("function value")
    plt.title("Artificial Bee Swarm algorithm for function optimization")
    plt.legend()
    plt.show()

运行程序:

 if __name__ == "__main__":
 
   bound = np.tile([[-600], [600]], 25)
   abs = ABS(60, 25, bound, 1000, [100, 0.5])
   abs.solve()

ObjFunction见简单遗传算法-python实现。

以上就是python实现人工蜂群算法的详细内容,更多关于python 人工蜂群算法的资料请关注其它相关文章!

DDR爱好者之家 Design By 杰米
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
DDR爱好者之家 Design By 杰米

《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线

暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。

艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。

《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。