Project

General

Profile

root / branches / 1.1 / src / haizea / core / frontends / tracefile.py @ 847

1
# -------------------------------------------------------------------------- #
2
# Copyright 2006-2009, University of Chicago                                 #
3
# Copyright 2008-2009, Distributed Systems Architecture Group, Universidad   #
4
# Complutense de Madrid (dsa-research.org)                                   #
5
#                                                                            #
6
# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
7
# not use this file except in compliance with the License. You may obtain    #
8
# a copy of the License at                                                   #
9
#                                                                            #
10
# http://www.apache.org/licenses/LICENSE-2.0                                 #
11
#                                                                            #
12
# Unless required by applicable law or agreed to in writing, software        #
13
# distributed under the License is distributed on an "AS IS" BASIS,          #
14
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
15
# See the License for the specific language governing permissions and        #
16
# limitations under the License.                                             #
17
# -------------------------------------------------------------------------- #
18

    
19
import haizea.common.constants as constants
20
from haizea.common.utils import get_clock
21
from haizea.core.frontends import RequestFrontend
22
from haizea.core.leases import LeaseWorkload, Lease, DiskImageSoftwareEnvironment, LeaseAnnotations
23
import operator
24
import logging
25

    
26
class TracefileFrontend(RequestFrontend):
27
    def __init__(self, starttime):
28
        RequestFrontend.__init__(self)
29
        self.logger = logging.getLogger("TFILE")
30
        self.starttime = starttime
31

    
32
    def load(self, manager):
33
        config = manager.config
34
        
35
        tracefile = config.get("tracefile")
36
        injectfile = config.get("injectionfile")
37
        annotationfile = config.get("annotationfile")
38
        
39
        # Read trace file
40
        # Requests is a list of lease requests
41
        self.logger.info("Loading tracefile %s" % tracefile)
42
        self.requests = None
43
        lease_workload = LeaseWorkload.from_xml_file(tracefile, self.starttime)
44
        self.requests = lease_workload.get_leases()
45
    
46
        if injectfile != None:
47
            self.logger.info("Loading injection file %s" % injectfile)
48
            inj_lease_workload = LeaseWorkload.from_xml_file(injectfile, self.starttime)
49
            inj_leases = inj_lease_workload.get_leases()
50
            for l in inj_leases:
51
                l.id += 1000000
52
            self.requests += inj_leases
53
            self.requests.sort(key=operator.attrgetter("submit_time"))
54

    
55
        if annotationfile != None:
56
            self.logger.info("Loading annotation file %s" % annotationfile)
57
            annotations = LeaseAnnotations.from_xml_file(annotationfile)
58
            annotations.apply_to_leases(self.requests)
59
            
60
        # Add runtime overhead, if necessary
61
        add_overhead = config.get("add-overhead")
62
        
63
        if add_overhead != constants.RUNTIMEOVERHEAD_NONE:
64
            slowdown_overhead = config.get("runtime-slowdown-overhead")
65
            boot_overhead = config.get("bootshutdown-overhead")
66
            for r in self.requests:
67
                if add_overhead == constants.RUNTIMEOVERHEAD_ALL or (add_overhead == constants.RUNTIMEOVERHEAD_BE and r.get_type() == Lease.BEST_EFFORT):
68
                    if slowdown_overhead != 0:
69
                        r.add_runtime_overhead(slowdown_overhead)
70
                    r.add_boot_overhead(boot_overhead)
71

    
72
        # Override requested memory, if necessary
73
        memory = config.get("override-memory")
74
        if memory != constants.NO_MEMORY_OVERRIDE:
75
            for r in self.requests:
76
                for n in r.requested_resources:
77
                    r.requested_resources[n].set_quantity(constants.RES_MEM, memory)            
78
            
79
        types = {}
80
        for r in self.requests:
81
            types[r.get_type()] = types.setdefault(r.get_type(), 0) + 1
82
        types_str = " + ".join(["%i %s" % (types[t],Lease.type_str[t]) for t in types])
83

    
84
        self.logger.info("Loaded workload with %i requests (%s)" % (len(self.requests), types_str))
85
        
86
        
87
    def get_accumulated_requests(self):
88
        # When reading from a trace file, there are no
89
        # "accumulated requests". Rather, we just take whatever
90
        # requests are in the trace up to the current time
91
        # reported by the resource manager
92
        time = get_clock().get_time()
93
        nowreq = [r for r in self.requests if r.submit_time <= time]
94
        self.requests = [r for r in self.requests if r.submit_time > time]   
95
        return nowreq              
96

    
97
    def exists_more_requests(self):
98
        return len(self.requests) != 0
99

    
100
    def get_next_request_time(self):
101
        if self.exists_more_requests():
102
            return self.requests[0].submit_time
103
        else:
104
            return None