- Update Xen patches to c/s 1011.
[opensuse:kernel-source.git] / patches.xen / xen-x86-dcr-fallback
1 Subject: Add fallback when XENMEM_exchange fails to replace contiguous region
2 From: jbeulich@novell.com
3 Patch-mainline: obsolete
4 References: 181869
5
6 This avoids losing precious special memory in places where any memory can be
7 used.
8
9 --- sle11sp1-2010-03-29.orig/arch/x86/mm/hypervisor.c   2009-11-06 10:52:02.000000000 +0100
10 +++ sle11sp1-2010-03-29/arch/x86/mm/hypervisor.c        2009-06-09 15:52:17.000000000 +0200
11 @@ -43,6 +43,7 @@
12  #include <xen/interface/memory.h>
13  #include <linux/module.h>
14  #include <linux/percpu.h>
15 +#include <linux/highmem.h>
16  #include <asm/tlbflush.h>
17  #include <linux/highmem.h>
18  
19 @@ -719,6 +720,83 @@ void xen_destroy_contiguous_region(unsig
20                 BUG();
21  
22         balloon_unlock(flags);
23 +
24 +       if (unlikely(!success)) {
25 +               /* Try hard to get the special memory back to Xen. */
26 +               exchange.in.extent_order = 0;
27 +               set_xen_guest_handle(exchange.in.extent_start, &in_frame);
28 +
29 +               for (i = 0; i < (1U<<order); i++) {
30 +                       struct page *page = alloc_page(__GFP_HIGHMEM|__GFP_COLD);
31 +                       unsigned long pfn;
32 +                       mmu_update_t mmu;
33 +                       unsigned int j = 0;
34 +
35 +                       if (!page) {
36 +                               printk(KERN_WARNING "Xen and kernel out of memory "
37 +                                      "while trying to release an order %u "
38 +                                      "contiguous region\n", order);
39 +                               break;
40 +                       }
41 +                       pfn = page_to_pfn(page);
42 +
43 +                       balloon_lock(flags);
44 +
45 +                       if (!PageHighMem(page)) {
46 +                               void *v = __va(pfn << PAGE_SHIFT);
47 +
48 +                               scrub_pages(v, 1);
49 +                               MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
50 +                                                       __pte_ma(0), UVMF_INVLPG|UVMF_ALL);
51 +                               ++j;
52 +                       }
53 +#ifdef CONFIG_XEN_SCRUB_PAGES
54 +                       else {
55 +                               scrub_pages(kmap(page), 1);
56 +                               kunmap(page);
57 +                               kmap_flush_unused();
58 +                       }
59 +#endif
60 +
61 +                       frame = pfn_to_mfn(pfn);
62 +                       set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
63 +
64 +                       MULTI_update_va_mapping(cr_mcl + j, vstart,
65 +                                               pfn_pte_ma(frame, PAGE_KERNEL),
66 +                                               UVMF_INVLPG|UVMF_ALL);
67 +                       ++j;
68 +
69 +                       pfn = __pa(vstart) >> PAGE_SHIFT;
70 +                       set_phys_to_machine(pfn, frame);
71 +                       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
72 +                               mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
73 +                               mmu.val = pfn;
74 +                               cr_mcl[j].op = __HYPERVISOR_mmu_update;
75 +                               cr_mcl[j].args[0] = (unsigned long)&mmu;
76 +                               cr_mcl[j].args[1] = 1;
77 +                               cr_mcl[j].args[2] = 0;
78 +                               cr_mcl[j].args[3] = DOMID_SELF;
79 +                               ++j;
80 +                       }
81 +
82 +                       cr_mcl[j].op = __HYPERVISOR_memory_op;
83 +                       cr_mcl[j].args[0] = XENMEM_decrease_reservation;
84 +                       cr_mcl[j].args[1] = (unsigned long)&exchange.in;
85 +
86 +                       if (HYPERVISOR_multicall(cr_mcl, j + 1))
87 +                               BUG();
88 +                       BUG_ON(cr_mcl[j].result != 1);
89 +                       while (j--)
90 +                               BUG_ON(cr_mcl[j].result != 0);
91 +
92 +                       balloon_unlock(flags);
93 +
94 +                       free_empty_pages(&page, 1);
95 +
96 +                       in_frame++;
97 +                       vstart += PAGE_SIZE;
98 +               }
99 +       }
100  }
101  EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
102  
103 --- sle11sp1-2010-03-29.orig/drivers/xen/balloon/balloon.c      2010-03-31 10:00:17.000000000 +0200
104 +++ sle11sp1-2010-03-29/drivers/xen/balloon/balloon.c   2010-03-31 10:00:24.000000000 +0200
105 @@ -776,7 +776,11 @@ struct page **alloc_empty_pages_and_page
106  }
107  EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
108  
109 -void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
110 +#endif /* CONFIG_XEN_BACKEND */
111 +
112 +#ifdef CONFIG_XEN
113 +static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages,
114 +                                         bool free_vec)
115  {
116         unsigned long flags;
117         int i;
118 @@ -787,17 +791,33 @@ void free_empty_pages_and_pagevec(struct
119         balloon_lock(flags);
120         for (i = 0; i < nr_pages; i++) {
121                 BUG_ON(page_count(pagevec[i]) != 1);
122 -               balloon_append(pagevec[i], 0);
123 +               balloon_append(pagevec[i], !free_vec);
124 +       }
125 +       if (!free_vec) {
126 +               bs.current_pages -= nr_pages;
127 +               totalram_pages = bs.current_pages - totalram_bias;
128         }
129         balloon_unlock(flags);
130  
131 -       kfree(pagevec);
132 +       if (free_vec)
133 +               kfree(pagevec);
134  
135         schedule_work(&balloon_worker);
136  }
137 -EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
138  
139 -#endif /* CONFIG_XEN_BACKEND */
140 +void free_empty_pages(struct page **pagevec, int nr_pages)
141 +{
142 +       _free_empty_pages_and_pagevec(pagevec, nr_pages, false);
143 +}
144 +#endif
145 +
146 +#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
147 +void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
148 +{
149 +       _free_empty_pages_and_pagevec(pagevec, nr_pages, true);
150 +}
151 +EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
152 +#endif
153  
154  void balloon_release_driver_page(struct page *page)
155  {
156 --- sle11sp1-2010-03-29.orig/include/xen/balloon.h      2009-11-06 10:51:32.000000000 +0100
157 +++ sle11sp1-2010-03-29/include/xen/balloon.h   2009-06-09 15:52:17.000000000 +0200
158 @@ -47,6 +47,10 @@ void balloon_update_driver_allowance(lon
159  struct page **alloc_empty_pages_and_pagevec(int nr_pages);
160  void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
161  
162 +/* Free an empty page range (not allocated through
163 +   alloc_empty_pages_and_pagevec), adding to the balloon. */
164 +void free_empty_pages(struct page **pagevec, int nr_pages);
165 +
166  void balloon_release_driver_page(struct page *page);
167  
168  /*