2021
Alexander Dockhorn and Rudolf Kruse.
Modelheuristics for efficient forward model learning.
In: At-Automatisierungstechnik, 69(10), 848–857.
2021. IEEE. https://doi.org/10.1515/auto-2021-0037
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Forward model learning, i. e., learning forward models from data, finds application in prediction-based control. This involves observing inputs and outputs of the system to build a transition model and make predictions about future time steps. In particular, complex state spaces require the use of specialized search and model building techniques. In this work, we present abstraction heuristics for high-dimensional state spaces, which allow to reduce the model complexity and, in many cases, yield an interpretable result. In the context of two case studies, we demonstrate the effectiveness of the presented procedure in the context of artificial intelligence in games and motion control scenarios. The transfer of these methods enables promising applications in automation engineering.
BibTeX:
@article{DockhornAT2021,
abstract = {Forward model learning, i. e., learning forward models from data, finds application in prediction-based control. This involves observing inputs and outputs of the system to build a transition model and make predictions about future time steps. In particular, complex state spaces require the use of specialized search and model building techniques. In this work, we present abstraction heuristics for high-dimensional state spaces, which allow to reduce the model complexity and, in many cases, yield an interpretable result. In the context of two case studies, we demonstrate the effectiveness of the presented procedure in the context of artificial intelligence in games and motion control scenarios. The transfer of these methods enables promising applications in automation engineering.},
author = {Dockhorn, Alexander and Kruse, Rudolf},
doi = {10.1515/auto-2021-0037},
issn = {2196677X},
journal = {At-Automatisierungstechnik},
keywords = {autonomous control,decomposition of forward models,forward model learning,local forward model,object-based forward model},
number = {10},
pages = {848--857},
title = {{Modelheuristics for efficient forward model learning}},
volume = {69},
year = {2021}
}
Alexander Dockhorn, Jorge Hurtado-Grueso, Dominik Jeurissen, Linjie Xu, and Diego Perez-Liebana.
Game State and Action Abstracting Monte Carlo Tree Search for General Strategy Game-Playing.
In: Proceedings of the 2021 IEEE Conference on Games (CoG),
2021. IEEE.
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
When implementing intelligent agents for strategy games, we observe that search-based methods struggle with the complexity of such games. To tackle this problem, we propose a new variant of Monte Carlo Tree Search which can incorporate action and game state abstractions. Focusing on the latter, we developed a game state encoding for turn-based strategy games that allows for a flexible abstraction. Using an optimization procedure, we optimize the agent's action and game state abstraction to maximize its performance against a rule-based agent. Furthermore, we compare different combinations of abstractions and their impact on the agent's performance based on the Kill the King game of the STRATEGA framework. Our results show that action abstractions have improved the performance of our agent considerably. Contrary, game state abstractions have not shown much impact. While these results may be limited to the tested game, they are in line with previous research on abstractions of simple Markov Decision Processes. The higher complexity of strategy games may require more intricate methods, such as hierarchical or time-based abstractions, to further improve the agent's performance.
BibTeX:
@inproceedings{Dockhorn2021,
abstract = {When implementing intelligent agents for strategy games, we observe that search-based methods struggle with the complexity of such games. To tackle this problem, we propose a new variant of Monte Carlo Tree Search which can incorporate action and game state abstractions. Focusing on the latter, we developed a game state encoding for turn-based strategy games that allows for a flexible abstraction. Using an optimization procedure, we optimize the agent's action and game state abstraction to maximize its performance against a rule-based agent. Furthermore, we compare different combinations of abstractions and their impact on the agent's performance based on the Kill the King game of the STRATEGA framework. Our results show that action abstractions have improved the performance of our agent considerably. Contrary, game state abstractions have not shown much impact. While these results may be limited to the tested game, they are in line with previous research on abstractions of simple Markov Decision Processes. The higher complexity of strategy games may require more intricate methods, such as hierarchical or time-based abstractions, to further improve the agent's performance.},
author = {Dockhorn, Alexander and Hurtado-Grueso, Jorge and Jeurissen, Dominik and Xu, Linjie and Perez-Liebana, Diego},
booktitle = {2021 IEEE Conference on Games (CoG)},
doi = {10.1109/CoG52621.2021.9619029},
isbn = {978-1-6654-3886-5},
keywords = {Game State Abstraction,General Strategy Game-playing,Index Terms-Action Abstraction,Monte Carlo Tree Search,N-Tuple Bandit Evolutionary Algorithm,Stratega},
month = {aug},
pages = {1--8},
publisher = {IEEE},
title = {{Game State and Action Abstracting Monte Carlo Tree Search for General Strategy Game-Playing}},
url = {https://gaigresearch.github.io/afm/ https://ieeexplore.ieee.org/document/9619029/},
year = {2021}
}
Alexander Dockhorn, Jorge Hurtado-Grueso, Dominik Jeurissen, Linjie Xu, and Diego Perez-Liebana.
Portfolio Search and Optimization for General Strategy Game-Playing.
In: Proceedings of the Congress on Evolutionary Computation (CEC),
2021 (to be published). IEEE.
[Abstract]
[PDF]
Abstract:
Portfolio methods represent a simple but efficient type of action abstraction which has shown to improve the performance of search-based agents in a range of strategy games. We first review existing portfolio techniques and propose a new algorithm for optimization and action-selection based on the Rolling Horizon Evolutionary Algorithm. Moreover, a series of variants are developed to solve problems in different aspects. We further analyze the performance of discussed agents in a general strategy game-playing task. For this purpose, we run experiments on three different game-modes of the Stratega framework. For the optimization of the agents' parameters and portfolio sets we study the use of the N-tuple Bandit Evolutionary Algorithm. The resulting portfolio sets suggest a high diversity in play-styles while being able to consistently beat the sample agents. An analysis of the agents' performance shows that the proposed algorithm generalizes well to all game-modes and is able to outperform other portfolio methods.
2020
Alexander Dockhorn, Chris Saxton, and Rudolf Kruse.
Association Rule Mining for Unknown Video Games.
In: M.-J. Lesot & C. Marsala (Eds.), Fuzzy Approaches for Soft Computing and Approximate Reasoning: Theories and Applications,
2020. (pp. 257–270). Springer Cham.
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Computational intelligence agents can reach expert levels in many known games,
such as Chess, Go, and Morris. Those systems incorporate powerful machine learning
algorithms, which are able to learn from, e.g., observations, play-traces, or by
reinforcement learning. While many black box systems, such as deep neural networks,
are able to achieve high performance in a wide range of applications, they generally
lack interpretability. Additionally, previous systems often focused on a single or a
small set of games, which makes it a cumbersome task to rebuild and retrain the
agent for each possible application. This paper proposes a method, which extracts
an interpretable set of game rules for previously unknown games. Frequent pattern
mining is used to find common observation patterns in the game environment. Finally,
game rules as well as winning-/losing-conditions are extracted via association
rule analysis. Our evaluation shows that a wide range of game rules can be
successfully extracted from previously unknown games. We further highlight how the
application of fuzzy methods can advance our efforts in generating explainable
artifical intelligence (AI) agents.
BibTeX:
@incollection{Dockhorn2018a-association-rule-mining,
author = {Dockhorn, Alexander and Saxton, Chris and Kruse, Rudolf},
booktitle = {Fuzzy Approaches for Soft Computing and Approximate Reasoning: Theories and Applications},
editor = {Lesot, Marie-Jeanne and Marsala, Christophe},
keywords = {action selection,association rule analy-,frequent pattern mining,planning,sis,subgoal induction},
pages = {257--270},
publisher = {Springer Cham},
title = {{Association Rule Mining for Unknown Video Games}},
year = {2020}
}
Alexander Dockhorn, Jorge Hurtado-Grueso, Dominik Jeurissen, and Diego Perez-Liebana.
“Stratega”: A General Strategy Games Framework..
In: J. C. Osborn (Ed.), Joint Proceedings of the AIIDE 2020 Workshops co-located with 16th AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment (AIIDE 2020), (pp. 1–7) CEUR Workshop Proceedings (2020). http://ceur-ws.org/Vol-2862/
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
Strategy games are complex environments often used in AI- research to evaluate
new algorithms. Despite the common- alities of most strategy games, often research
is focused on one game only, which may lead to bias or overfitting to a particular
environment. In this paper, we motivate and present STRATEGA - a general strategy
games framework for playing n-player turn-based and real-time strategy games.
The platform currently implements turn-based games, which can be configured via
YAML-files. It exposes an API with access to a forward model to facilitate research
on statistical forward plan- ning agents. The framework and agents can log
information during games for analysing and debugging algorithms. We also present
some sample rule-based agents, as well as search- based agents like Monte Carlo
Tree Search and Rolling Hori- zon Evolution, and quantitatively analyse their
performance to demonstrate the use of the framework. Results, although purely
illustrative, show the known problems that traditional search-based agents have
when dealing with high branching factors in these games. STRATEGA can be downloaded at:
https://github.research.its.qmul.ac.uk/eecsgameai/Stratega
BibTeX:
@inproceedings{Dockhorn2020,
abstract = {Stratega, a general strategy games framework, has been designed to foster research on computational intelligence for strategy games. In contrast to other strategy game frameworks, Stratega allows to create a wide variety of turn-based and real-time strategy games using a common API for agent development. While the current version supports the development of turn-based strategy games and agents, we will add support for real-time strategy games in future updates. Flexibility is achieved by utilising YAML-files to configure tiles, units, actions, and levels. Therefore, the user can design and run a variety of games to test developed agents without specifically adjusting it to the game being generated. The framework has been built with a focus of statistical forward planning (SFP) agents. For this purpose, agents can access and modify game-states and use the forward model to simulate the outcome of their actions. While SFP agents have shown great flexibility in general game-playing, their performance is limited in case of complex state and action-spaces. Finally, we hope that the development of this framework and its respective agents helps to better understand the complex decision-making process in strategy games. Stratega can be downloaded at: https://github.research.its.qmul.ac.uk/eecsgameai/Stratega},
address = {Worcester},
archivePrefix = {arXiv},
arxivId = {2009.05643},
author = {Dockhorn, Alexander and Grueso, Jorge Hurtado and Jeurissen, Dominik and Perez-Liebana, Diego},
booktitle = {Joint Proceedings of the AIIDE 2020 Workshops co-located with 16th AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment (AIIDE 2020)},
editor = {Osborn, Joesph C.},
eprint = {2009.05643},
mendeley-groups = {Dissertation/General Game Playing},
pages = {1--7},
publisher = {CEUR Workshop Proceedings},
title = {{"Stratega": A General Strategy Games Framework}},
url = {http://ceur-ws.org/Vol-2862/},
year = {2020}
}
Raluca D. Gaina, Martin Balla, Alexander Dockhorn, Raúl Montoliu, Diego Perez-Liebana
TAG : A Tabletop Games Framework.
In: J. C. Osborn (Ed.), Joint Proceedings of the AIIDE 2020 Workshops co-located with 16th AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment (AIIDE 2020), (pp. 1–7) CEUR Workshop Proceedings (2020). http://ceur-ws.org/Vol-2862/
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Tabletop games come in a variety of forms, including board games, card games,
and dice games. In recent years, their complexity has considerably increased,
with many components, rules that change dynamically through the game, diverse player
roles, and a series of control parameters that influence a game's balance. As such,
they also encompass novel and intricate challenges for Artificial Intelligence
methods, yet research largely focuses on classical board games such as
chess and Go. We introduce in this work the Tabletop Games (TAG)
framework, which promotes research into general AI in modern tabletop games,
facilitating the implementation of new games and AI players, while providing
analytics to capture the complexities of the challenges proposed. We include
preliminary results with sample AI players, showing some moderate success, with
plenty of room for improvement, and discuss further developments and new research
directions.
BibTeX:
@inproceedings{Gaina2020,
abstract = {Tabletop games come in a variety of forms, including board games, card games, and dice games. In recent years, their complexity has considerably increased, with many components, rules that change dynamically through the game, diverse player roles, and a series of control parameters that influence a game's balance. As such, they also encompass novel and intricate challenges for Artificial Intelligence methods, yet research largely focuses on classical board games such as \textit{chess} and \textit{Go}. We introduce in this work the Tabletop Games (TAG) framework, which promotes research into general AI in modern tabletop games, facilitating the implementation of new games and AI players, while providing analytics to capture the complexities of the challenges proposed. We include preliminary results with sample AI players, showing some moderate success, with plenty of room for improvement, and discuss further developments and new research directions.},
address = {Worcester},
author = {Gaina, Raluca D and Balla, Martin and Dockhorn, Alexander and Montoliu, Ra{\'{u}}l and Perez-liebana, Diego},
booktitle = {Joint Proceedings of the AIIDE 2020 Workshops co-located with 16th AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment (AIIDE 2020)},
editor = {Osborn, Joesph C.},
mendeley-groups = {Dissertation/General Game Playing},
pages = {1--7},
publisher = {CEUR Workshop Proceedings},
title = {{TAG : A Tabletop Games Framework}},
url = {http://ceur-ws.org/Vol-2862/},
year = {2020}
}
Diego Perez-Liebana, Alexander Dockhorn, Jorge Hurtado, and Dominik Jeurissen.
The Design Of “Stratega”: A General Strategy Games Framework.
2020. http://arxiv.org/abs/2009.05643
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Stratega, a general strategy games framework, has been designed to foster research
on computational intelligence for strategy games. In contrast to other strategy game
frameworks, Stratega allows to create a wide variety of turn-based and real-time
strategy games using a common API for agent development. While the current version
supports the development of turn-based strategy games and agents, we will add support
for real-time strategy games in future updates. Flexibility is achieved by utilising
YAML-files to configure tiles, units, actions, and levels. Therefore, the user can
design and run a variety of games to test developed agents without specifically
adjusting it to the game being generated. The framework has been built with a focus
of statistical forward planning (SFP) agents. For this purpose, agents can access
and modify game-states and use the forward model to simulate the outcome of their
actions. While SFP agents have shown great flexibility in general game-playing, their
performance is limited in case of complex state and action-spaces. Finally, we hope
that the development of this framework and its respective agents helps to better
understand the complex decision-making process in strategy games. Stratega can be
downloaded at: https://github.com/GAIGResearch/Stratega
BibTeX:
@misc{2009.05643,
Author = {Diego Perez-Liebana and Alexander Dockhorn and Jorge Hurtado Grueso and Dominik Jeurissen},
Title = {The Design Of "Stratega": A General Strategy Games Framework},
Year = {2020},
Eprint = {arXiv:2009.05643},
}
Raluca D. Gaina, Martin Balla, Alexander Dockhorn, Raul Montoliu, and Diego Perez-Liebana.
Design and Implementation of TAG: A Tabletop Games Framework.
2020. http://arxiv.org/abs/2009.12065
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
This document describes the design and implementation of the Tabletop Games framework (TAG),
a Java-based benchmark for developing modern board games for AI research.
TAG provides a common skeleton for implementing tabletop games based on a common
API for AI agents, a set of components and classes to easily add new games and an
import module for defining data in JSON format. At present, this platform includes
the implementation of seven different tabletop games that can also be used as an
example for further developments. Additionally, TAG also incorporates logging
functionality that allows the user to perform a detailed analysis of the game, in
terms of action space, branching factor, hidden information, and other measures of
interest for Game AI research. The objective of this document is to serve as a central
point where the framework can be described at length. TAG can be downloaded at:
https://github.com/GAIGResearch/TabletopGames
BibTeX:
@misc{2009.12065,
Author = {Raluca D. Gaina and Martin Balla and Alexander Dockhorn and Raul Montoliu and Diego Perez-Liebana},
Title = {Design and Implementation of TAG: A Tabletop Games Framework},
Year = {2020},
Eprint = {arXiv:2009.12065},
}
Alexander Dockhorn, and Rudolf Kruse.
Forward Model Learning for Motion Control Tasks.
In: IEEE 10th International Conference on Intelligent Systems (IS),
2020.
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
In this work, we study the capabilities and limitations of forward model
learning agents and their applications to motion-control tasks. Forward
model learning agents learn to approximate the environment dynamics to
apply planning algorithms for action-selection. While previous work has
shown that forward model learning agents can efficiently learn to play
simple video games, we extend their applicability to domains with continuous
state and action spaces. Our experiments show that such agents are quickly
able to learn an approximate model of their environment, which suffices to
solve several simple motion-control tasks. Comparisons with deep reinforcement
learning further highlight the sample efficiency of forward model learning agents.
BibTeX:
@inproceedings{DockhornMotionControl2020,
author = {Dockhorn, Alexander and Kruse, Rudolf},
isbn = {9781728154565},
keywords = {Forward Model Learning; Deep Reinforcement Learning; Motion Control Tasks},
pages = {1--5},
title = {{Forward Model Learning for Motion Control Tasks}},
year = {2020},
note = {accepted for publication}
}
Alexander Dockhorn, and Rudolf Kruse.
Balancing Exploration And Exploitation in Forward Model Learning.
In: Advances in Intelligent Systems Research and Innovation,
(to be published).
[Abstract]
[BibTeX]
[PDF]
Abstract:
Forward model learning algorithms enable the application of simulation-based
search methods in environments for which the forward model is unknown.
Multiple studies have shown great performance in game-related and motion control
applications. In these, forward model learning agents often required less training time
while achieving a similar performance than state-of-the-art reinforcement learning
methods. However, several problems can emerge when replacing the environment’s
true model with a learned approximation. While the true forward model allows the
accurate prediction of future time-steps, a learned forward model may always be
inaccurate in its prediction. These inaccuracies become problematic when planning
long action sequences since the confidence in predicted time-steps reduces with
increasing depth of the simulation. In this work, we explore methods for balancing
risk and reward in decision-making using inaccurate forward models. Therefore, we
propose methods for measuring the variance of a forward model and the confidence
in the predicted outcome of planned action sequences. Based on these metrics, we
define methods for learning and using forward models under consideration of their
current prediction accuracy. Proposed methods have been tested in various motion
control tasks of the Open AI Gym framework. Results show that the information on
the model’s accuracy can be used to increase the efficiency of the agent’s training
and the agent’s performance during evaluation.
BibTeX:
@incollection{Dockhorn2020,
author = {Dockhorn, Alexander and Kruse, Rudolf},
booktitle = {Advances in Intelligent Systems Research and Innovation},
pages = {1--20},
publisher = {Elsevier},
title = {{Balancing Exploration And Exploitation in Forward Model Learning}},
year = {2020}
}
Alexander Dockhorn, and Simon Lucas.
Local Forward Model Learning for GVGAI Games.
In: Proceedings of the IEEE Conference on Games, 2020.
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
In this work, we study the capabilities and limitations of forward model
learning agents and their applications to motion-control tasks. Forward
model learning agents learn to approximate the environment dynamics to
apply planning algorithms for action-selection. While previous work has
shown that forward model learning agents can efficiently learn to play
simple video games, we extend their applicability to domains with continuous
state and action spaces. Our experiments show that such agents are quickly
able to learn an approximate model of their environment, which suffices to
solve several simple motion-control tasks. Comparisons with deep reinforcement
learning further highlight the sample efficiency of forward model learning agents.
BibTeX:
@article{Dockhorn2020COG,
author = {Dockhorn, Alexander and Lucas, Simon},
doi = {10.1109/CoG47356.2020.9231793},
isbn = {9781728145334},
issn = {23254289},
journal = {IEEE Conference on Computational Intelligence and Games, CIG},
keywords = {GVGAI framework,General Game Learning,Local Forward Model,Rolling Horizon Evolutionary Algorithm},
pages = {716--723},
title = {{Local Forward Model Learning for GVGAI Games}},
volume = {2020-August},
year = {2020}
}
Alexander Dockhorn and Rudolf Kruse.
Predicting Cards Using a Fuzzy Multiset Clustering of Decks.
In: International Journal of Computational Intelligence Systems (IJCIS),
2020.
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Search-based agents have shown to perform well in many game-based applications.
In the context of partially-observable scenarios agent’s require the state to be
fully determinized. Especially in case of collectible cards games, the sheer
number of decks constructed by players hinder an agent to reliably estimate the
game’s current state, and therefore, renders the search ineffective. In this
paper, we propose the use of a (fuzzy) multiset representation to describe
frequently played decks. Extracted deck prototypes have shown to match human
expert labels well and seem to serve as an efficient abstraction of the deck
space. We further show that such deck prototypes allow the agent to predict
upcoming cards with high accuracy, therefore, allowing more accurate sampling
procedures for search-based agents.
BibTeX:
@article{DockhornIJCIS2020,
author = {Dockhorn, Alexander and Kruse, Rudolf},
doi = {10.2991/ijcis.d.200805.001},
issn = {18756883},
journal = {International Journal of Computational Intelligence Systems},
keywords = {Clustering, Deck analysis, Fuzzy multisets, Hearthstone},
number = {1},
pages = {1207--1217},
title = {{Predicting cards using a fuzzy multiset clustering of decks}},
volume = {13},
year = {2020}
}
Daan Apeldoorn and Alexander Dockhorn.
Exception-Tolerant Hierarchical Knowledge Bases for Forward Model Learning.
In: IEEE Transactions on Games (TOG),
2020.
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
This paper provides an overview of the recently proposed forward model approximation framework for learning games of the general video game artificial intelligence (GVGAI) framework. In contrast to other general game-playing algorithms, the proposed agent model does not need a full description of the game but can learn the game’s rules by observing game state transitions. Based on hierarchical knowledge bases, the forward model can be learned and revised during game-play, improving the accuracy of the agent’s state predictions over time. This allows the application of simulation-based search algorithms and belief revision techniques to previously unknown settings. We show that the proposed framework is able to quickly learn a model for dynamic environments in the context of the GVGAI framework.
BibTeX:
@article{Apeldoorn2020,
author = {Apeldoorn, Daan and Dockhorn, Alexander},
doi = {10.1109/TG.2020.3008002},
issn = {2475-1502},
journal = {IEEE Transactions on Games},
pages = {1--1},
title = {{Exception-Tolerant Hierarchical Knowledge Bases for Forward Model Learning}},
url = {https://ieeexplore.ieee.org/document/9136897/},
year = {2020}
}
Alexander Dockhorn.
Dissertation: Prediction-based Search for Autonomous Game-Playing.
In: Otto von Guericke University Magdeburg,
2020.
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
Simulation-based search algorithms have been widely applied in the context of autonomous game-playing.
Their flexibility allows for the rapid development of agents that are able to achieve satisfying
performance in many problem domains. However, these algorithms share two requirements, namely,
access to a forward model and full knowledge of the environment’s state. In this thesis, simulation-based
search algorithms will be adapted to tasks in which either the forward model or the state of the
environment is unknown.
To play a game without a forward model, methods for learning the environment’s model from recent
interactions between the agent and the environment are proposed. These forward model learning
techniques allow the agent to predict the outcome of its actions, and therefore, enable a
prediction-based search process. An analysis of environment models shows how they can be
represented and learned in the form of an end-to-end forward model. Based on this general approach,
three methods are proposed which reduce the number of possible models and, thus, the training time
required. The proposed forward model learning techniques are evaluated according to their applicability
to general game-learning tasks and validated based on a wide variety of games. The results show the applicability
of prediction-based search agents for games where the forward model is not accessible.
In case the environment’s state cannot be fully observed by the agent and the number of possible states
is low, state determinisation methods, which uniformly sample possible states have shown to perform well.
However, if the number of states is high, the uniform state sampling approach performs worse than non-determinising
search methods due to the search process spending too much time on unlikely states. In this thesis, two methods
for predictive state determinisation are proposed. These sample probable states based on the agent’s partial
observation of the current state and a database of previously played games, which allows the agent to focus
its search process on likely states. Proposed algorithms are evaluated in terms of their prediction accuracy
and game-playing performance in the context of the collectible card game Hearthstone. Results show that the
implemented agent outperforms other state-of-the-art agents in case the replay database is representative
for the state distribution.
BibTeX:
@phdthesis{Dockhorn2020,
author = {Dockhorn, Alexander},
pages = {1--231},
school = {Otto von Guericke University Magdeburg},
title = {{Prediction-based Search for Autonomous Game-Playing}},
year = {2020}
}
2019
Simon Mark Lucas, Alexander Dockhorn, Vanessa Volz, Chris Bamford, Raluca Gaina, Ivan Bravi, Diego Perez-Liebana, Sanaz Mostaghim, and Rudolf Kruse.
A Local Approach to Forward Model Learning: Results on the Game of Life Game.
In: Proceedings of the 2019 IEEE Conference on Games (CoG), 1-8. https://doi.org/10.1109/CIG.2019.8848002
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
This paper investigates the effect of learning a forward model on the performance
of a statistical forward planning agent. We transform Conway’s Game of Life simulation
into a single-player game where the objective can be either to preserve as much life
as possible or to extinguish all life as quickly as possible. In order to learn the
forward model of the game, we formulate the problem in a novel way that learns the
local cell transition function by creating a set of supervised training data and
predicting the next state of each cell in the grid based on its current state and
immediate neighbours. Using this method we are able to harvest sufficient data to
learn perfect forward models by observing only a few complete state transitions,
using either a look-up table, a decision tree, or a neural network. In contrast,
learning the complete state transition function is a much harder task and our initial
efforts to do this using deep convolutional auto-encoders were less successful. We also
investigate the effects of imperfect learned models on prediction errors and game-playing
performance, and show that even models with significant errors can provide good performance.
BibTeX:
@inproceedings{Lucas2019,
archivePrefix = {arXiv},
arxivId = {1903.12508v1},
author = {Lucas, Simon M and Dockhorn, Alexander and Volz, Vanessa and Bamford, Chris and Gaina, Raluca D and Bravi, Ivan and Perez-Liebana, Diego and Mostaghim, Sanaz and Kruse, Rudolf},
booktitle = {2019 IEEE Conference on Games (CoG)},
doi = {10.1109/CIG.2019.8848002},
eprint = {1903.12508v1},
isbn = {978-1-7281-1884-0},
month = {aug},
pages = {1--8},
publisher = {IEEE},
title = {{A Local Approach to Forward Model Learning: Results on the Game of Life Game}},
url = {https://ieeexplore.ieee.org/document/8848002/},
year = {2019}
}
Alexander Dockhorn, Simon Mark Lucas, Vanessa Volz, Chris Bamford, Ivan Bravi, Raluca Gaina, Diego Perez-Liebana.
Learning Local Forward Models on Unforgiving Games.
In: Proceedings of the 2019 IEEE Conference on Games (CoG), 1-4. https://doi.org/10.1109/CIG.2019.8848044
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
This paper examines learning approaches for forward models based on local cell transition functions.
We provide a formal definition of local forward models for which we propose two basic learning approaches.
Our analysis is based on the game Sokoban, where a wrong action can lead to an unsolvable game state.
Therefore, an accurate prediction of an action’s resulting state is necessary to avoid this scenario.
In contrast to learning the complete state transition function, local forward models allow extracting
multiple training examples from a single state transition. In this way, the Hash Set model, as well
as the Decision Tree model, quickly learn to predict upcoming state transitions of both the training
and the test set. Applying the model using a statistical forward planner showed that the best models
can be used to satisfying degree even in cases in which the test levels have not yet been seen.
Our evaluation includes an analysis of various local neighbourhood patterns and sizes to test the
learners’ capabilities in case too few or too many attributes are extracted, of which the latter has
shown do degrade the performance of the model learner.
BibTeX:
@inproceedings{Dockhorn2019COG,
address = {London},
archivePrefix = {arXiv},
arxivId = {1909.00442},
author = {Dockhorn, Alexander and Lucas, Simon M and Volz, Vanessa and Bravi, Ivan and Gaina, Raluca D and Perez-Liebana, Diego},
booktitle = {2019 IEEE Conference on Games (CoG)},
doi = {10.1109/CIG.2019.8848044},
eprint = {1909.00442},
isbn = {978-1-7281-1884-0},
month = {aug},
pages = {1--4},
publisher = {IEEE},
title = {{Learning Local Forward Models on Unforgiving Games}},
url = {https://ieeexplore.ieee.org/document/8848044/},
year = {2019}
}
Alexander Dockhorn and Sanaz Mostaghim.
Introducing the Hearthstone-AI Competition.
pages 1-4. http://arxiv.org/abs/1906.04238
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
The Hearthstone AI framework and competition motivates the development of artificial intelligence agents that can play collectible card games. A special feature of those games is the high variety of cards, which can be chosen by the players to create their own decks. In contrast to simpler card games, the value of many cards is determined by their possible synergies. The vast amount of possible decks, the randomness of the game, as well as the restricted information during the player's turn offer quite a hard challenge for the development of game-playing agents. This short paper introduces the competition framework and goes into more detail on the problems and challenges that need to be faced during the development process.
BibTeX:
@article{Dockhorn2019Competition,
archivePrefix = {arXiv},
arxivId = {1906.04238},
author = {Dockhorn, Alexander and Mostaghim, Sanaz},
eprint = {1906.04238},
month = {may},
pages = {1--4},
title = {{Introducing the Hearthstone-AI Competition}},
url = {http://arxiv.org/abs/1906.04238},
year = {2019}
}
Alexander Dockhorn, Tony Schwensfeier, and Rudolf Kruse.
Fuzzy Multiset Clustering for Metagame Analysis.
In: Proceedings of the 2019 Conference of the International Fuzzy Systems Association
and the European Society for Fuzzy Logic and Technology (EUSFLAT 2019), 1-4. https://doi.org/10.1109/CIG.2019.8848044
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
Developing agents for automated game playing is a demanding task in the general
game production cycle. Especially the involvement of frequent balance changes after
the release, e.g. as they often occur in collectible card games, require constant
updates of the developed agent. The game’s developers need to constantly analyze
and understand the current meta-game for adjusting the agent’s parameters,
making balance changes to the game and ultimately sustaining the satisfaction
of its player base. The underlying analysis largely depends on evaluating players’
playtraces. Necessary adjustments to the agent’s and the game’s parameters are taken
care of by the game’s developers. This paper proposes a first step in automatically
observing the current state of a collectible card game, which will assist the developers
in their understanding of established deck archetypes and, therefore, speed up the
update cycle. Fuzzy multisets are used for modeling decks and frequently occurring
subsets of cards. We propose the definition of a (fuzzy) multiset centroid to uniquely
represent the cluster and its contained decks and show that it better matches the deck
archetype than the often reported deck core. The proposed clustering procedure identifies
deck archetypes and takes track of its common variants in the current meta-game.
We evaluate the approach by comparing the result of our clustering procedure with a
hand-labeled data set and show that it is able to reproduce clusters of similar
quality to a labeling provided by experts.
BibTeX:
@inproceedings{Dockhorn2019/08,
title={Fuzzy Multiset Clustering for Metagame Analysis},
author={Alexander Dockhorn and Tony Schwensfeier and Rudolf Kruse},
year={2019/08},
booktitle={Proceedings of the 11th Conference of the European Society for Fuzzy Logic and Technology (EUSFLAT 2019)},
pages={536-543},
issn={2589-6644},
isbn={978-94-6252-770-6},
url={https://doi.org/10.2991/eusflat-19.2019.74},
doi={https://doi.org/10.2991/eusflat-19.2019.74},
publisher={Atlantis Press}
}
2018
Alexander Dockhorn, Tim Tippelt, and Rudolf Kruse.
Model Decomposition for Forward Model Approximation.
In: 2018 IEEE Symposium Series on Computational Intelligence (SSCI), 1751–1757. https://doi.org/10.1109/SSCI.2018.8628624
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
In this paper we propose a model decomposition architecture, which advances on
our previous attempts of learning an approximated forward model for unknown games.
The developed model architecture is based on design constraints of the General Video Game
Artificial Intelligence Competition and the Video Game Definition Language.
Our agent first builds up a database of interactions with the game environment for
each distinct component of a game. We further train a decision tree model for each
of those independent components. For predicting a future state we query each model
individually and aggregate the result. The developed model ensemble does not just
predict known states with a high accuracy, but also adapts very well to previously
unseen levels or situations. Future work will show how well the increased accuracy
helps in playing an unknown game using simulation-based search algorithms such as
Monte Carlo Tree Search.
BibTeX:
@inproceedings{Dockhorn2018,
author = {Dockhorn, Alexander and Tippelt, Tim and Kruse, Rudolf},
booktitle = {2018 IEEE Symposium Series on Computational Intelligence (SSCI)},
doi = {10.1109/SSCI.2018.8628624},
isbn = {978-1-5386-9276-9},
month = {nov},
pages = {1751--1757},
publisher = {IEEE},
title = {{Model Decomposition for Forward Model Approximation}},
url = {https://ieeexplore.ieee.org/document/8628624/},
year = {2018}
}
Alexander Dockhorn and Rudolf Kruse.
Detecting Sensor Dependencies for Building Complementary Model Ensembles.
In: Proceedings of the 28. Workshop Computational Intelligence, Dortmund, 29.-30. November 2018, 217–234.
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
In this paper we propose a model decomposition architecture, which advances on
our previous attempts of learning an approximated forward model for unknown games.
The developed model architecture is based on design constraints of the General Video Game
Artificial Intelligence Competition and the Video Game Definition Language.
Our agent first builds up a database of interactions with the game environment for
each distinct component of a game. We further train a decision tree model for each
of those independent components. For predicting a future state we query each model
individually and aggregate the result. The developed model ensemble does not just
predict known states with a high accuracy, but also adapts very well to previously
unseen levels or situations. Future work will show how well the increased accuracy
helps in playing an unknown game using simulation-based search algorithms such as
Monte Carlo Tree Search.
BibTeX:
@inproceedings{DockhornCI2018,
author = {Dockhorn, Alexander and Kruse, Rudolf},
booktitle = {Proceedings of the 28. Workshop Computational Intelligence, Dortmund, 29.-30. November 2018},
pages = {217--234},
title = {{Detecting Sensor Dependencies for Building Complementary Model Ensembles}},
year = {2018}
}
Alexander Dockhorn and Daan Apeldoorn.
Forward Model Approximation for General Video Game Learning.
In: Proceedings of the 2018 IEEE Conference on Computational Intelligence and Games (CIG’18). 425–432. https://doi.org/10.1109/CIG.2018.8490411
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
This paper proposes a novel learning agent model for a General Video Game Playing agent.
Our agent learns an approximation of the forward model from repeatedly playing a game
and subsequently adapting its behavior to previously unseen levels. To achieve this,
it first learns the game mechanics through machine learning techniques and then extracts
rulebased symbolic knowledge on different levels of abstraction. When being confronted
with new levels of a game, the agent is able to revise its knowledge by a novel belief
revision approach. Using methods such as Monte Carlo Tree Search and Breadth First Search,
it searches for the best possible action using simulated game episodes. Those simulations
are only possible due to reasoning about future states using the extracted rulebased knowledge
from random episodes during the learning phase. The developed agent outperforms previous
agents by a large margin, while still being limited in its prediction capabilities.
The proposed forward model approximation opens a new class of solutions in the context
of General Video Game Playing, which do not try to learn a value function, but try to
increase their accuracy in modelling the game.
BibTeX:
@inproceedings{DockhornApeldoorn,
author = {Dockhorn, Alexander and Apeldoorn, Daan},
booktitle = {Proceedings of the 2018 IEEE Conference on Computational Intelligence and Games (CIG'18)},
doi = {10.1109/CIG.2018.8490411},
isbn = {9781538643594},
month = {aug},
pages = {425--432},
publisher = {IEEE},
title = {{Forward Model Approximation for General Video Game Learning}},
url = {https://ieeexplore.ieee.org/document/8490411/},
year = {2018}
}
Alexander Dockhorn, Max Frick, Ünal Akkaya and Rudolf Kruse.
Predicting Opponent Moves for Improving Hearthstone AI.
In: In J. Medina, M. Ojeda-Aciego, J. L. Verdegay, D. A. Pelta, I. P. Cabrera, B. Bouchon-Meunier, & R. R. Yager (Eds.), 17th International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems, IPMU 2018.
621–632. Springer International Publishing. https://doi.org/10.1007/978-3-319-91476-3_51
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
Games pose many interesting questions for the development of artificial intelligence agents.
Especially popular are methods that guide the decision-making process of an autonomous agent,
which is tasked to play a certain game. In previous studies, the heuristic search method Monte
Carlo Tree Search (MCTS) was successfully applied to a wide range of games. Results showed that
this method can often reach playing capabilities on par with humans or even better. However,
the characteristics of collectible card games such as the online game Hearthstone make it
infeasible to apply MCTS directly. Uncertainty in the opponent’s hand cards, the card draw,
and random card effects considerably restrict the simulation depth of MCTS. We show that
knowledge gathered from a database of human replays help to overcome this problem by predicting
multiple card distributions. Those predictions can be used to increase the simulation depth of
MCTS. For this purpose, we calculate bigram-rates of frequently co-occurring cards to predict
multiple sets of hand cards for our opponent. Those predictions can be used to create an ensemble
of MCTS agents, which work under the assumption of differing card distributions and perform
simulations according to their assigned distribution. The proposed ensemble approach outperforms
other agents on the game Hearthstone, including various types of MCTS. Our case study shows that
uncertainty can be handled effectively using predictions of sufficient accuracy, ultimately,
improving the MCTS guided decision-making process. The resulting decision-making based on such
an MCTS ensemble proved to be less prone to errors by uncertainty and opens up a new class of MCTS algorithms.
BibTeX:
@inproceedings{Dockhorn2018Hearthstone,
author = {Dockhorn, Alexander and Frick, Max and Akkaya, {\"{U}}nal and Kruse, Rudolf},
booktitle = {17th International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems, IPMU 2018},
doi = {10.1007/978-3-319-91476-3_51},
editor = {Medina, Jes{\{}$\backslash$'u{\}}s and Ojeda-Aciego, Manuel and Verdegay, Jos{\{}$\backslash$'e{\}} Luis and Pelta, David A. and Cabrera, Inma P. and Bouchon-Meunier, Bernadette and Yager, Ronald R.},
pages = {621--632},
publisher = {Springer International Publishing},
title = {{Predicting Opponent Moves for Improving Hearthstone AI}},
url = {http://link.springer.com/10.1007/978-3-319-91476-3{\_}51},
year = {2018}
}
2017
Alexander Dockhorn, Christoph Doell, Matthias Hewelt and Rudolf Kruse.
A decision heuristic for Monte Carlo tree search doppelkopf agents.
In: 2017 IEEE Symposium Series on Computational Intelligence (SSCI).
1–8. https://doi.org/10.1109/SSCI.2017.8285181
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
This work builds up on previous research by Sievers and Helmert, who developed an Monte Carlo
Tree Search based doppelkopf agent. This four player card game features a larger state
space than skat due to the unknown cards of the contestants. Additionally, players face
the unique problem of not knowing their teammates at the start of the game. Figuring out
the player parties is a key feature of this card game and demands differing play styles
depending on the current knowledge of the game state. In this work we enhance the Monte
Carlo Tree Search agent created by Sievers and Helmert with a decision heuristic.
Our goal is to improve the quality of playouts, by suggesting high quality moves
and predicting enemy moves based on a neural network classifier. This classifier is
trained on an extensive history of expert player moves recorded during official doppelkopf
tournaments. Different network architectures are discussed and evaluated based on their
prediction accuracy. The best perform- ing network was tested in a direct comparison with
the previous Monte Carlo Tree Search agent by Sievers and Helmert. We show that high quality
predictions increase the quality of playouts. Overall, our simulations show that adding the
decision heuristic increased the strength of play under comparable computational effort.
BibTeX:
@inproceedings{Dockhorn2017,
author = {Dockhorn, Alexander and Doell, Christoph and Hewelt, Matthias and Kruse, Rudolf},
booktitle = {2017 IEEE Symposium Series on Computational Intelligence (SSCI)},
doi = {10.1109/SSCI.2017.8285181},
isbn = {978-1-5386-2726-6},
month = {nov},
pages = {1--8},
publisher = {IEEE},
title = {{A decision heuristic for Monte Carlo tree search doppelkopf agents}},
url = {http://ieeexplore.ieee.org/document/8285181/},
year = {2017}
}
Tim Sabsch, Christian Braune, Alexander Dockhorn, and Rudolf Kruse.
Using a Multiobjective Genetic Algorithm for Curve Approximation.
In: 2017 IEEE Symposium Series on Computational Intelligence (SSCI).
1–8. https://doi.org/10.1109/SSCI.2017.8285179
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
Fitting a parametric curve to unordered point cloud data is a frequently
encountered problem in areas, where raster data has to be vectorized,
or advanced geometric descriptors of point clouds are to be found. Existing
approaches often struggle with certain geometric properties, such as varying
density, self-intersections, sharp corners, or are only designed to handle
low-dimensional and discrete data. With the purpose to overcome these difficulties,
the applicability of evolutionary algorithms to the topic of curve approximation is
studied in this work. Based on the popular algorithm NSGA-II, an implementation has
been developed that uses the distance to the point cloud and the number of control
points of a curve as objective functions. The evaluation reveals that the proposed
objective functions control the evolutionary process well, and the final curves fit
most of the evaluated data sets correctly. The results of the study indicate the
usefulness of genetic algorithms for the topic of curve fitting and form a basis
for future research in this area.
BibTeX:
@inproceedings{Sabsch2017,
author = {Sabsch, Tim and Braune, Christian and Dockhorn, Alexander and Kruse, Rudolf},
booktitle = {2017 IEEE Symposium Series on Computational Intelligence, SSCI 2017 - Proceedings},
doi = {10.1109/SSCI.2017.8285179},
isbn = {9781538627259},
pages = {1--6},
publisher = {IEEE},
title = {{Using a multiobjective genetic algorithm for curve approximation}},
volume = {2018-January},
year = {2018}
}
Alexander Dockhorn and Rudolf Kruse.
Combining cooperative and adversarial coevolution in the context of pac-man.
In: 2017 IEEE Conference on Computational Intelligence and Games (CIG).
60–67. https://doi.org/10.1109/CIG.2017.8080416
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
In this paper we discuss our recent approach for evolving a diverse set of agents
for both the Pac-Man and the Ghost Team track of the current Ms. Pac-Man vs. Ghost Team
competition. We used genetic programming for generating various agents, which were
distributed in multiple populations. The optimization includes cooperative and adversarial
subtasks, such that Pac-Man is constantly competing against the Ghost Team, whereas the
Ghost Team is formed of four cooperatively evolving populations. For the generation of a
Ghost Team and calculation of the associated fitness we took one individual from each population.
This strict separation preserves the evolution pressure for each population such that respective
Ghost Teams compete against each other in developing an efficient cooperation in catching
Pac-Man. This approach not only is useful for developing a versatile set of playing agents,
but also for adapting the team to the current behavior of the competing populations.
Ultimately, we aim for optimizing both tasks in parallel.
BibTeX:
@inproceedings{Dockhorn2017a,
author = {Dockhorn, Alexander and Kruse, Rudolf},
booktitle = {2017 IEEE Conference on Computational Intelligence and Games, CIG 2017},
doi = {10.1109/CIG.2017.8080416},
isbn = {9781538632338},
pages = {60--67},
title = {{Combining cooperative and adversarial coevolution in the context of pac-man}},
year = {2017}
}
2016
Alexander Dockhorn, Christian Braune, and Rudolf Kruse.
Variable density based clustering.
In: 2016 IEEE Symposium Series on Computational Intelligence (SSCI).
1–8. https://doi.org/10.1109/SSCI.2016.7849925
[Abstract]
[BibTeX]
[URL]
[PDF]
[Slides]
Abstract:
The class of density-based clustering algorithms excels in detecting clusters of arbitrary
shape. DBSCAN, the most common representative, has been demonstrated to be useful in a lot
of applications. Still the algorithm suffers from two drawbacks, namely a non-trivial
parameter estimation for a given dataset and the limitation to data sets with constant
cluster density. The first was already addressed in our previous work, where we presented
two hierarchical implementations of DBSCAN. In combination with a simple optimization procedure,
those proofed to be useful in detecting appropriate parameter estimates based on an objective
function. However, our algorithm was not capable of producing clusters of differing density.
In this work we will use the hierarchical information to extract variable density clusters and
nested cluster structures. Our evaluation shows that the clustering approach based on
edge-lengths of the dendrogram or based on area estimates successfully detects clusters
of arbitrary shape and density.
BibTeX:
@inproceedings{Dockhorn2016,
author = {Dockhorn, Alexander and Braune, Christian and Kruse, Rudolf},
booktitle = {2016 IEEE Symposium Series on Computational Intelligence (SSCI)},
doi = {10.1109/SSCI.2016.7849925},
isbn = {978-1-5090-4240-1},
keywords = {Dockhorn2016},
month = {dec},
pages = {1--8},
publisher = {IEEE},
title = {{Variable density based clustering}},
url = {http://ieeexplore.ieee.org/document/7849925/},
year = {2016}
}
2015
Alexander Dockhorn.
Master Thesis: Hierarchical Extensions and Cluster Validation Techniques for DBSCAN.
At: Otto von Guericke University Magdeburg, 2015.
pp. 1-80.
[Abstract]
[BibTeX]
[PDF]
Abstract:
DBSCAN is one of the most commonly used density-based clustering algorithms.
While it performs good in various clustering scenarios, finding appropriate parameters
for the algorithm is a non-trivial task. Multiple works proposed parameter estimation
or elimination techniques. However, most of the resulting algorithms suffer from usability
issues and the incapability of finding clusters of different densities. In this work
we propose an alternating optimization algorithm to find locally optimal parameter
combinations for DBSCAN. It combines two hierarchical versions of DBSCAN, which are
generated by fixing one parameter and iterating through the parameter space of the
other. Due to the monotonicity of the parameter space, successive hierarchy levels
can efficiently be computed. Hierarchies generated this way can be analyzed to find
an appropriate estimate for the free parameter or finding clusters with different
densities by the use of non-horizontal cuts. An internal validation criterion is
used to find an appropriate horizontal cut. Throughout this work we compare multiple
internal validation measures and propose a density-based interpretation of the
silhouette coefficient. Our results show that the proposed density-based silhouette
coefficient adapts well to non-convex clusters produced by DBSCAN.
Also, the alternating optimization approach automatically detects a good parameter
combination in a variety of clustering scenarios. Additionally, non-hierarchical cuts
performed especially well in the detection of clusters with different densities.
BibTeX:
@phdthesis{Dockhorn2015a,
author = {Dockhorn, Alexander},
pages = {101},
school = {Otto von Guericke University of Magdeburg},
title = {{Hierarchical Extensions and Cluster Validation Techniques for DBSCAN}},
type = {Master Thesis},
year = {2015}
}
Alexander Dockhorn, Christian Braune, and Rudolf Kruse.
An Alternating Optimization Approach based on Hierarchical Adaptations of DBSCAN.
In: 2015 IEEE Symposium Series on Computational Intelligence (SSCI).
2, 749–755. https://doi.org/10.1109/SSCI.2015.113
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
DBSCAN is one of the most common density-based clustering algorithms. While
multiple works tried to present an appropriate estimate for needed parameters
we propose an alternating optimization algorithm, which finds a locally optimal
parameter combination. The algorithm is based on the combination of two
hierarchical versions of DBSCAN, which can be generated by fixing one parameter
and iterating through possible values of the second parameter. Due to monotonicity
of the neighborhood sets and the core-condition, successive levels of the hierarchy
can efficiently be computed. An local optimal parameter combination can be
determined using internal cluster validation measures. In this work we are comparing
the measures edge-correlation and silhouette coefficient. For the latter we propose
a density-based interpretation and show a respective computational efficient estimate
to detect non-convex clusters produced by DBSCAN. Our results show, that the
algorithm can automatically detect a good DBSCAN clustering on a variety of cluster
scenarios.
BibTeX:
@inproceedings{Dockhorn2015,
author = {Dockhorn, Alexander and Braune, Christian and Kruse, Rudolf},
booktitle = {2015 IEEE Symposium Series on Computational Intelligence (SSCI)},
doi = {10.1109/SSCI.2015.113},
isbn = {9781479975600},
number = {2},
pages = {749--755},
title = {{An Alternating Optimization Approach based on Hierarchical Adaptations of DBSCAN}},
year = {2015}
}
Pascal Held, Alexander Dockhorn, and Rudolf Kruse.
On Merging and Dividing Social Graphs.
In: Journal of Artificial Intelligence and Soft Computing Research.
5(1), 23–49. https://doi.org/10.1515/jaiscr-2015-0017
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Modeling social interaction can be based on graphs. However most models lack
the flexibility of including larger changes over time. The Barabási-Albert-model
is a generative model which already offers mechanisms for adding nodes. We will
extent this by presenting four methods for merging and five for dividing graphs
based on the Barabási-Albert-model. Our algorithms were motivated by different
real world scenarios and focus on preserving graph properties derived from these
scenarios. With little alterations in the parameter estimation those algorithms
can be used for other graph models as well. All algorithms were tested in multiple
experiments using graphs based on the Barabási-Albert-model, an extended
version of the Barabási-Albert-model by Holme and Kim, the Watts-Strogatz-model
and the Erdős-Rényi-model. Furthermore we concluded that our algorithms are able
to preserve different properties of graphs independently from the used model. To support
the choice of algorithm, we created a guideline which highlights advantages and
disadvantages of discussed methods and their possible use-cases.
BibTeX:
@article{Held2015,
author = {Held, Pascal and Dockhorn, Alexander and Kruse, Rudolf},
doi = {10.1515/jaiscr-2015-0017},
issn = {2083-2567},
journal = {Journal of Artificial Intelligence and Soft Computing Research},
month = {jan},
number = {1},
pages = {23--49},
title = {{On Merging and Dividing Social Graphs}},
url = {http://content.sciendo.com/view/journals/jaiscr/5/1/article-p23.xml},
volume = {5},
year = {2015}
}
Pascal Held, Alexander Dockhorn, Benjamin Krause, and Rudolf Kruse.
Clustering Social Networks Using Competing Ant Hives.
In: 2015 Second European Network Intelligence Conference.
67–74. https://doi.org/10.1109/ENIC.2015.18
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Methods for clustering static graphs
cannot always be transfered straight forward to dynamic
scenarios. A typical approach is to reduce the
number of updates by reusing results of previous iterations.
But are there natural ways to implement
dynamic graph clustering? This paper proposes a
method which was derived by graph based ant colony
algorithms. Similar to other clustering algorithms,
multiple ant colonies are competing for the available nodes.
Each hive creates ants, which will explore nearby graph structures
and drop hive-specific pheromones on visited nodes. Over time, hives will
collect nodes and will be relocated to the center of
all collected nodes. In case of dynamic graph clustering, pheromone
values can be reused in consecutive
iterations. Our evaluation revealed that the proposed
algorithm can lead to results on a par with the k-median algorithm and performs worse than Louvain
clustering. However competing ant hives have the
advantage of implicit noise detection, which comes at
the cost of longer computation times. This can make
it a suitable choice for certain clustering tasks.
BibTeX:
@inproceedings{Kruse2015,
author = {Held, Pascal and Dockhorn, Alexander and Krause, Benjamin and Kruse, Rudolf},
booktitle = {2015 Second European Network Intelligence Conference},
doi = {10.1109/ENIC.2015.18},
isbn = {978-1-4673-7592-4},
month = {sep},
pages = {67--74},
publisher = {IEEE},
title = {{Clustering Social Networks Using Competing Ant Hives}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7321238},
year = {2015}
}
2014
Alexander Dockhorn.
Bachelor Thesis: Computergestützte Analyse onkologischer Daten mithilfe Graphischer Modelle.
At: Otto von Guericke University Magdeburg, 2014.
pp. 1-80.
[Abstract]
[BibTeX]
[PDF]
Abstract:
The presented thesis addresses the generation of hypotheses in the area of oncology,
which can be used as foundation for future clinical studies. Therefor an interactive
procedure is presented, which divides the set of patients into subsets to reduce the
number of attribute combinations in each subset. This should restrict the number of
generated hypotheses to those relevant for the current object of investigation.
Markov networks of diverse subsets are computed to get an overview of available patient
data. Differences and commonalities of calculated models can be highlighted in a
comparison view. This can provide suggestions for further investigations of certain
dependencies. A subsequent association analysis will be limited to dependent attributes.
The capabilities of the proposed procedure will be illustrated on the basis of a test data set,
where known underlying association rules could be extracted, while the search efforts could be
significantly reduced in comparison to a standard association analysis.
BibTeX:
@phdthesis{Dockhorn2014,
author = {Dockhorn, Alexander},
number = {April},
pages = {1--80},
school = {Otto von Guericke University of Magdeburg},
title = {{Computergest{\"{u}}tzte Analyse onkologischer Daten mithilfe Graphischer Modelle}},
type = {Bachelor Thesis},
year = {2014}
}
Pascal Held, Alexander Dockhorn, and Rudolf Kruse.
Generating Events for Dynamic Social Network Simulations.
In: 15th International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems, IPMU 2014.
444, 17–24. https://doi.org/10.1109/EALS.2014.7009499
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
Social Network Analysis in the last decade has gained remarkable attention.
The current analysis focuses more and more on the dynamic behavior of them.
The underlying structure from Social Networks, like facebook, or twitter,
can change over time. Groups can be merged or single nodes can move from one
group to another. But these phenomenas do not only occur in social networks
but also in human brains. The research in neural spike trains also focuses on
finding functional communities. These communities can change over time by
switching the stimuli presented to the subject. In this paper we introduce
a data generator to create such dynamic behavior, with effects in the interactions
between nodes. We generate time stamps for events for one-to-one, one-to-many,
and many-to-all relations. This data could be used to demonstrate the functionality
of algorithms on such data, e.g. clustering or visualization algorithms. We
demonstrated that the generated data fulfills common properties of social networks.
BibTeX:
@incollection{Held2014b,
doi = {10.1007/978-3-319-08855-6_6},
url = {https://doi.org/10.1007/978-3-319-08855-6_6},
year = {2014},
publisher = {Springer International Publishing},
pages = {46--55},
author = {Pascal Held and Alexander Dockhorn and Rudolf Kruse},
title = {Generating Events for Dynamic Social Network Simulations},
booktitle = {Information Processing and Management of Uncertainty in Knowledge-Based Systems}
}
Pascal Held, Alexander Dockhorn, and Rudolf Kruse.
On Merging and Dividing of Barabási-Albert-graphs..
In: 2014 IEEE Symposium on Evolving and Autonomous Learning Systems (EALS).
444, 17–24. https://doi.org/10.1109/EALS.2014.7009499
[Abstract]
[BibTeX]
[URL]
[PDF]
Abstract:
The Barabási-Albert-model is commonly used to
generate scale-free graphs, like social networks. To generate dynamics
in these networks, methods for altering such graphs are needed.
Growing and shrinking is done simply by doing further generation
iterations or undo them. In our paper we present four methods to merge
two graphs based on the Barabási-Albert- model, and five strategies to
reverse them. First we compared these algorithms by edge preservation,
which describes the ratio of the inner structure kept after altering.
To check if hubs in the initial graphs are hubs in the resulting graphs
as well, we used the node-degree rank correlation. Finally we tested how
well the node-degree distribution follows the power-law function from the
Barabási-Albert-model.
BibTeX:
@inproceedings{Held2014,
author = {Held, Pascal and Dockhorn, Alexander and Kruse, Rudolf},
booktitle = {2014 IEEE Symposium on Evolving and Autonomous Learning Systems (EALS)},
doi = {10.1109/EALS.2014.7009499},
isbn = {978-1-4799-4494-1},
issn = {18650929},
keywords = {Hausdorff distance,Natural extension,Regular extension,Robustness,coherent lower and upper previsions,$\alpha$-cut},
pages = {17--24},
title = {{On Merging and Dividing of Barabasi-Albert-graphs}},
volume = {444},
year = {2014}
}