<?xml version="1.0" encoding="utf-8" ?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns="http://purl.org/rss/1.0/">




    



<channel rdf:about="https://cis-india.org/search_rss">
  <title>Centre for Internet and Society</title>
  <link>https://cis-india.org</link>
  
  <description>
    
            These are the search results for the query, showing results 251 to 265.
        
  </description>
  
  
  
  
  <image rdf:resource="https://cis-india.org/logo.png"/>

  <items>
    <rdf:Seq>
        
            <rdf:li rdf:resource="https://cis-india.org/a2k/files/wikisource-handbook-for-indian-communities"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/about/newsletters/august-2018-newsletter"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/raw/essays-on-offline-selected-abstracts"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/files/ai-in-india-a-policy-agenda"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/huffington-post-august-25-2018-paul-bluementhal-and-gopal-sathe-indias-biometric-database-is-creating-a-perfect-surveillance-state"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/world-library-and-information-congress-2018"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/files/analysis-of-cloud-act-and-implications-for-india"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/files/indias-contribution-to-internet-governance-debates"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/a2k/blogs/workshop-of-publishers-and-writers-on-unicode-open-source-and-wikimedia-projects"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/files/normative-regulation-of-cyber-space-report"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/about/newsletters/july-2018-newsletter"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/a2k/blogs/workshop-of-river-activists-for-building-jal-bodh-knowledge-resource-on-water"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/the-centre-for-internet-and-society2019s-comments-and-recommendations-to-the-indian-privacy-code-2018"/>
        
    </rdf:Seq>
  </items>

</channel>


    <item rdf:about="https://cis-india.org/a2k/files/wikisource-handbook-for-indian-communities">
    <title>Wikisource Handbook for Indian Communities</title>
    <link>https://cis-india.org/a2k/files/wikisource-handbook-for-indian-communities</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/a2k/files/wikisource-handbook-for-indian-communities'&gt;https://cis-india.org/a2k/files/wikisource-handbook-for-indian-communities&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>


   <dc:date>2018-09-19T01:52:09Z</dc:date>
   <dc:type>File</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/about/newsletters/august-2018-newsletter">
    <title>August 2018 Newsletter</title>
    <link>https://cis-india.org/about/newsletters/august-2018-newsletter</link>
    <description>
        &lt;b&gt;CIS newsletter for the month of August 2018.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;&lt;span&gt;Dear readers,&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Previous issues of the newsletters can be &lt;a class="external-link" href="http://cis-india.org/about/newsletters"&gt;accessed here&lt;/a&gt;.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Highlights&lt;/h2&gt;
&lt;ul&gt;
&lt;li style="text-align: justify; "&gt;India houses the second largest population in the world at approximately 1.35 billion individuals. In such a diverse and dense context, law enforcement could be a challenging job. Elonnai Hickok and Vipul Kharbanda &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/an-analysis-of-the-cloud-act-and-implications-for-india"&gt;throw light on the CLOUD Act and its implifications for India in a blog post&lt;/a&gt;. &lt;/li&gt;
&lt;li style="text-align: justify; "&gt;On August 9, 2018, the DNA Technology (Use and Application) Regulation Bill, 2018 was introduced in the Lok Sabha. CIS had commented on some key aspects of the bill in many forums earlier. Elonnai Hickok and Murali Neelakantan in an article &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/bloomberg-quint-elonnai-hickok-and-murali-neelakantan-august-20-2018-dna-evidence-only-opinion-not-science-and-definitely-not-proof-of-crime"&gt;published by Bloomberg Quint&lt;/a&gt; have voiced their opinion on the bill.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;Murali Neelakantan, Swaraj Barooah, Swagam Dasgupta and Torsha Sarkar in an &lt;a class="external-link" href="http://cis-india.org/internet-governance/blog/bloomberg-quint-murali-neelakantan-swaraj-barooah-swagam-dasgupta-torsha-sarkar-august-14-2018-national-health-stack-data-for-datas-sake-a-manmade-health-hazard"&gt;Op-ed in Bloomberg Quint&lt;/a&gt; have examined the National Health Stack, an ambitious attempt by the government to to build a digital infrastructure with a “deep understanding of the incentive structures prevalent in the Indian healthcare ecosystem. The authors have argued that collection of health data, without sensitisation and accountability, has the potential to deny healthcare to the vulnerable.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;An article titled &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/nlud-student-law-journal-sunil-abraham-mukta-batra-geetha-hariharan-swaraj-barooah-and-akriti-bopanna-indias-contribution-to-internet-governance-debates"&gt;India's Contribution to Internet Governance Debates&lt;/a&gt;, co-authored by Sunil Abraham, Mukta Batra, Geetha Hariharan, Swaraj Barooah and Akriti Bopanna, was published in the NLUD Student Law Journal, an annual peer-reviewed journal published by the National Law University, Delhi. &lt;/li&gt;
&lt;li style="text-align: justify; "&gt;IT/IT-eS Sector and the Future of Work in India was organized at Omidyar Networks’ office in Bangalore, on June 29, 2018. Torsha Sarkar, Ambika Tandon and Aayush Rath &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/future-of-work-report-of-the-workshop-on-the-it-it-es-sector-and-the-future-of-work-in-india"&gt;co-authored a report&lt;/a&gt;.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;Swaraj Barooah and Gurshabad Grover &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/livemint-july-24-2018-swaraj-barooah-and-gurshabad-grover-anti-trafficking-bill-may-lead-to-censorship"&gt;co-authored an article in Livemint&lt;/a&gt; that examines a few problematic provisions in the proposed Anti-trafficking Bill. The authors say that it may lead to censorship.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;The Researchers at Work programme of CIS &lt;a class="external-link" href="https://cis-india.org/raw/call-for-essays-offline"&gt;had invited abstracts for essays&lt;/a&gt; that explore dimensions of offline lives. Selected authors are expected to submit the first draft of the essay (2000-4000 words) by Friday, October 5, 2018.&lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;Articles&lt;/h2&gt;
&lt;ul&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/livemint-july-24-2018-swaraj-barooah-and-gurshabad-grover-anti-trafficking-bill-may-lead-to-censorship"&gt;Anti-trafficking Bill may lead to censorship&lt;/a&gt; (Swaraj Barooah and Gurshabad Grover; Livemint; July 24, 2018). &lt;i&gt;The article was mirrored on CIS website in the month of August 2018&lt;/i&gt;.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/bloomberg-quint-august-6-2018-murali-neelakantan-swaraj-barooah-swagam-dasgupta-torsha-sarkar-national-health-stack-an-expensive-temporary-placebo"&gt;The National Health Stack: An Expensive, Temporary Placebo&lt;/a&gt; (Murali Neelakantan, Swaraj Barooah, Swagam Dasgupta, and Torsha Sarkar; Bloomberg Quint; August 6, 2018).&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/raw/indian-express-august-12-2018-nishant-shah-digital-native-double-speak"&gt;Digital Native: Double Speak&lt;/a&gt; (Nishant Shah; Indian Express; August 12, 2018).&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/nlud-student-law-journal-sunil-abraham-mukta-batra-geetha-hariharan-swaraj-barooah-and-akriti-bopanna-indias-contribution-to-internet-governance-debates"&gt;India's Contribution to Internet Governance Debates&lt;/a&gt; (Sunil Abraham, Mukta Batra, Geetha Hariharan, Swaraj Barooah and Akriti Bopanna; NLUD Student Law Journal; August 16, 2018).&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/blog/bloomberg-quint-murali-neelakantan-swaraj-barooah-swagam-dasgupta-torsha-sarkar-august-14-2018-national-health-stack-data-for-datas-sake-a-manmade-health-hazard"&gt;National Health Stack: Data for Data's Sake, A Manmade Health Hazard &lt;/a&gt;(Murali Neelakantan, Swaraj Barooah, Swagam Dasgupta and Torsha Sarkar; Bloomberg Quint; August 17, 2018).&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/bloomberg-quint-elonnai-hickok-and-murali-neelakantan-august-20-2018-dna-evidence-only-opinion-not-science-and-definitely-not-proof-of-crime"&gt;DNA ‘Evidence’: Only Opinion, Not Science, And Definitely Not Proof Of Crime!&lt;/a&gt; (Elonnai Hickok and Murali Neelakantan; Bloomberg Quint; August 22, 2018).&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/raw/indian-express-august-26-2018-nishant-shah-digital-native-playing-god"&gt;Digital Native: Playing God&lt;/a&gt; (Nishant Shah; Indian Express; August 26, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;h2&gt;CIS in the News&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/undp-august-1-2018-undp-joins-tech-giants-in-partnership-on-ai"&gt;UNDP joins Tech Giants in Partnership on AI&lt;/a&gt; (UNDP; August 1, 2018). CIS is one of the partners.&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/livemint-august-3-2018-uidai-says-asked-nobody-to-add-the-helpline-number-to-contacts"&gt;UIDAI says asked nobody to add the helpline number to contacts&lt;/a&gt; (Komal Gupta; Livemint; August 3, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/economic-times-august-10-2018-mugdha-variyar"&gt;How Chinese apps are making inroads in Indian small towns&lt;/a&gt; (Mugdha Variyar; Economic Times; August 10, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/factor-daily-anand-murali-august-13-2018-the-big-eye"&gt;The Big Eye: The tech is all ready for mass surveillance in India&lt;/a&gt; (Anand Murali; Factor Daily; August 13, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/hindustan-times-august-21-2018-centre-draws-red-lines-for-whatsapp-over-fake-news-says-must-comply-with-indian-laws"&gt;Centre draws red lines for Whatsapp over fake news, says must comply with Indian laws&lt;/a&gt; (Nakul Sridhar; Hindustan Times; August 21, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/the-straits-times-august-24-2018-debarshi-dasgupta-india-steps-up-vigilance-against-whatsapp-abuse"&gt;India steps up vigilance against WhatsApp abuse&lt;/a&gt; (Debashree Dasgupta; Straits Times; August 24, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/news/huffington-post-august-25-2018-paul-bluementhal-and-gopal-sathe-indias-biometric-database-is-creating-a-perfect-surveillance-state"&gt;India’s Biometric Database Is Creating A Perfect Surveillance State — And U.S. Tech Companies Are On Board&lt;/a&gt; (Paul Bluementhal and Gopal Sathe; Huffington Post; August 25, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/hindustan-times-rachel-lopez-august-26-2018-20-years-of-google-privacy-fake-news-and-future"&gt;20 years of Google: Privacy, fake news and the future&lt;/a&gt; (Rachel Lopez; Hindustan Times; August 26, 2018).&lt;br /&gt;&lt;br /&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;&lt;a href="http://cis-india.org/a2k"&gt;Access to Knowledge&lt;/a&gt;&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Our Access to Knowledge programme currently consists of two projects.  The Pervasive Technologies project, conducted under a grant from the  International Development Research Centre (IDRC), aims to conduct  research on the complex interplay between low-cost pervasive  technologies and intellectual property, in order to encourage the  proliferation and development of such technologies as a social good. The  Wikipedia project, which is under a grant from the Wikimedia  Foundation, is for the growth of Indic language communities and projects  by designing community collaborations and partnerships that recruit and  cultivate new editors and explore innovative approaches to building  projects.&lt;/p&gt;
&lt;h3&gt;Wikipedia&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Blog Entry&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/c2ec3fc38c3fc2ec3f-c2ac24c4dc30c3fc15-c17c4dc30c02c25c3ec32c2fc02c32c4b-c24c46c32c41c17c41-c35c3fc15c40c2ac40c21c3fc2fc28c4dc32-c15c3ec30c4dc2fc15c4dc30c2ec02"&gt;మిసిమి పత్రిక గ్రంథాలయంలో తెలుగు వికీపీడియన్ల కార్యక్రమం&lt;/a&gt; (Pavan Santhosh; August 22, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;b&gt;Events Organized&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/partnership-activity-in-annamayya-library-guntur"&gt;వికీపీడియా:సమావేశం/గుంటూరు/అన్నమయ్య గ్రంథాలయం - భాగస్వామ్య కార్యక్రమం జూలై 2018&lt;/a&gt; (Organized by CIS-A2K; Annamaya Library; Guntur; July 10, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/workshop-of-publishers-and-writers-on-unicode-open-source-and-wikimedia-projects"&gt;Workshop of Publishers and Writers on Unicode, Open Source and Wikimedia Projects&lt;/a&gt; (Organized by CIS-A2K; Pune; July 25, 2018). &lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/workshop-of-river-activists-for-building-jal-bodh-knowledge-resource-on-water"&gt;Workshop of River activists for building Jal Bodh - Knowledge resource on Water&lt;/a&gt; (Organized by CIS-A2K; Pune; July 25, 2018). &lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/tumakur%20university-workshop"&gt;ವಿಕಿಪೀಡಿಯ:ಸಂಪಾದನೋತ್ಸವಗಳು/ಸಂಪಾದನೋತ್ಸವ ತುಮಕೂರು ವಿಶ್ವವಿದ್ಯಾನಿಲಯ ೨೦೧೮ &lt;/a&gt;(Organized by CIS-A2K; Tumakur University; July 25, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://meta.wikimedia.org/wiki/Intensive_Personalised_Wiki_Training_Session_at_Pune"&gt;Intensive Personalised Wiki Training Session at Pune&lt;/a&gt; (Organized by CIS-A2K; August 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://meta.wikimedia.org/wiki/Wikisource_and_Wiki_technical_session_at_MKCL,_Pune"&gt;Wikisource and Wiki technical session at MKCL&lt;/a&gt; (Organized by CIS-A2K; Pune; August 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://meta.wikimedia.org/wiki/Wiki_technical_orientation_session_with_PyLadies_group_at_Cummins_College_of_Engineering,_Pune"&gt;Wiki technical orientation session with PyLadies group&lt;/a&gt; (Organized by CIS-A2K; Cummins College of Engineering, Pune; August 7, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://meta.wikimedia.org/wiki/Indian_Independence_Struggle_Edit-a-thon_on_Marathi_Wikipedia"&gt;Indian Independence Struggle Edit-a-thon on Marathi Wikipedia&lt;/a&gt; (Organized by CIS-A2K; August 10 - 20, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;div&gt;&lt;b&gt;Event Participation&lt;/b&gt;&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/partnership-discussions-with-misimi-telugu-monthly-magazine"&gt;వికీపీడియా:సమావేశం/హైదరాబాదు/మిసిమి పత్రిక భాగస్వామ్య సమావేశం, జూలై 2018&lt;/a&gt; (July 24, 2018). CIS-A2K held partnership discussions with Misimi Telugu monthly magazine. &lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;div&gt;&lt;/div&gt;
&lt;div&gt;Note: &lt;i&gt;Event reports for all these were published in the month of August 2018&lt;/i&gt;.&lt;/div&gt;
&lt;div&gt;&lt;/div&gt;
&lt;h2&gt;&lt;a href="http://cis-india.org/internet-governance"&gt;Internet Governance&lt;/a&gt;&lt;/h2&gt;
&lt;p&gt;As part of its research on privacy and free speech, CIS is engaged with  two different projects. The first one (under a grant from Privacy  International and IDRC) is on surveillance and freedom of expression  (SAFEGUARDS). The second one (under a grant from MacArthur Foundation)  is on restrictions that the Indian government has placed on freedom of  expression online.&lt;/p&gt;
&lt;h3&gt;Privacy&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Blog Entries&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/use-of-visuals-and-nudges-in-privacy-notices"&gt;Use of Visuals and Nudges in Privacy Notices&lt;/a&gt; (Saumyaa Naidu; edited by Elonnai Hickok and Amber Sinha; August 18, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/an-analysis-of-the-cloud-act-and-implications-for-india"&gt;An Analysis of the CLOUD Act and Implications for India&lt;/a&gt; (Elonnai Hickok and Vipul Kharbanda; August 22, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/consumer-care-society-silver-jubilee-year-celebrations"&gt;Consumer Care Society: Silver Jubilee Year Celebrations&lt;/a&gt; (Arindrajit Basu; August 27, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;div&gt;&lt;b&gt;Event Participation&lt;/b&gt;&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/celebrating-one-year-of-the-justice-k-s-puttaswamy-v-union-of-india-judgment"&gt;Celebrating One Year of the Justice K.S. Puttaswamy v. Union of India Judgment&lt;/a&gt; (Organized by Indian Council for Research on International Economic Relations and Centre for Communication Governance at National Law University - Delhi; India International Centre; New Delhi; August 24, 2018). Shweta Mohandas was a panelist at the event.&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;h3&gt;Free Speech &amp;amp; Expression&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Blog Entry&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/icann-response-to-didp-31-on-diversity"&gt;ICANN response to DIDP #31 on diversity&lt;/a&gt; (Akriti Bopanna and Akash Sriram; August 21, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;div&gt;&lt;b&gt;Event Participation&lt;/b&gt;&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/feminist-information-infrastructure-workshop-with-blank-noise-and-sangama"&gt;Feminist Information Infrastructure Workshop with Blank Noise and Sangama&lt;/a&gt; (Organized by Sangama and Blank Noise; CIS, Bangalore; August 8, 2018). Akriti Bopanna, Swaraj Paul Barooah and Ambika Tandon conducted the workshop.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/summer-school-on-disinformation"&gt;Summer School on Disinformation&lt;/a&gt; (Organized by Digital Asia Hub, Hans-Bredow-Institut, University of Hamburg, Institute for Technology &amp;amp; Society of Rio de Janeiro - ITS Rio and Berkman Klein Center for Internet and Society at Harvard University; Azure Room, Pullman, Jakarta; August 22 - 24, 2018). Sunil Abraham made a presentation on Disinformation and Online Recruitment.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/world-library-and-information-congress-2018"&gt;World Library and Information Congress 2018&lt;/a&gt; (Organized by International Federation of Library Associations and Institutions; Kuala Lumpur; August 26 - 27, 2018). Swaraj Paul Barooah was a speaker at two panels. Swaraj's first panel, titled "Intellectual Freedom in a Polarised World" was selected as one of 9 sessions to be live-streamed and recorded, out of 249 sessions in total. The recording can be accessed on &lt;a class="external-link" href="https://www.youtube.com/watch?v=0HujFHQn1zY"&gt;YouTube&lt;/a&gt;.&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;div&gt;&lt;/div&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;h3&gt;Information Technology&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Blog Entry&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/future-of-work-report-of-the-workshop-on-the-it-it-es-sector-and-the-future-of-work-in-india"&gt;Future of Work: Report of the ‘Workshop on the IT/IT-eS Sector and the Future of Work in India’&lt;/a&gt; (Torsha Sarkar, Ambika Tandon and Aayush Rath; edited by Elonnai Hickok. Akash Sriram and Divya Kushwaha; August 16, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;&lt;span style="text-align: justify; "&gt;&lt;span style="text-align: justify; "&gt;&lt;a href="http://cis-india.org/raw"&gt;Researchers at Work&lt;/a&gt;&lt;/span&gt;&lt;/span&gt;&lt;/h2&gt;
&lt;p&gt;&lt;span style="text-align: justify; "&gt;&lt;span style="text-align: justify; "&gt;The Researchers at Work (RAW) programme is an interdisciplinary research initiative driven by an emerging need to understand the reconfigurations of social practices and structures through the Internet and digital media technologies, and vice versa. It aims to produce local and contextual accounts of interactions, negotiations, and resolutions between the Internet, and socio-material and geo-political processes:&lt;/span&gt;&lt;/span&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;span style="text-align: justify; "&gt;&lt;span style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/raw/call-for-essays-offline"&gt;Call for Essays: Offline&lt;/a&gt; (P.P. Sneha; August 6, 2018). Selected authors are expected to submit the first draft of the essay (2000-4000 words) by Friday, October 5, 2018.&lt;/span&gt;&lt;/span&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr /&gt;
&lt;h2&gt;&lt;span style="text-align: justify; "&gt;&lt;span style="text-align: justify; "&gt;&lt;span style="text-align: justify; "&gt;&lt;a href="http://cis-india.org/"&gt;About CIS&lt;/a&gt;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;The Centre for Internet and  Society (CIS) is a non-profit organisation that undertakes  interdisciplinary research on internet and digital technologies from  policy and academic perspectives. The areas of focus include digital  accessibility for persons with disabilities, access to knowledge,  intellectual property rights, openness (including open data, free and  open source software, open standards, open access, open educational  resources, and open video), internet governance, telecommunication  reform, digital privacy, and cyber-security. The academic research at  CIS seeks to understand the reconfigurations of social and cultural  processes and structures as mediated through the internet and digital  media technologies.&lt;/p&gt;
&lt;p&gt;► Follow us elsewhere&lt;/p&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li&gt;Twitter:&lt;a href="http://twitter.com/cis_india"&gt; http://twitter.com/cis_india&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Twitter - Access to Knowledge: &lt;a href="https://twitter.com/CISA2K"&gt;https://twitter.com/CISA2K&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Twitter - Information Policy: &lt;a href="https://twitter.com/CIS_InfoPolicy"&gt;https://twitter.com/CIS_InfoPolicy&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Facebook - Access to Knowledge:&lt;a href="https://www.facebook.com/cisa2k"&gt; https://www.facebook.com/cisa2k&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;E-Mail - Access to Knowledge: &lt;a&gt;a2k@cis-india.org&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;E-Mail - Researchers at Work: &lt;a&gt;raw@cis-india.org&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;List - Researchers at Work: &lt;a href="https://lists.ghserv.net/mailman/listinfo/researchers"&gt;https://lists.ghserv.net/mailman/listinfo/researchers&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;p&gt;► Support Us&lt;/p&gt;
&lt;div&gt;Please help us defend consumer and citizen rights on the Internet!  Write a cheque in favour of 'The Centre for Internet and Society' and  mail it to us at No. 194, 2nd 'C' Cross, Domlur, 2nd Stage, Bengaluru -  5600 71.&lt;/div&gt;
&lt;p&gt;► Request for Collaboration&lt;/p&gt;
&lt;div&gt;
&lt;p style="text-align: justify; "&gt;We invite researchers, practitioners, artists, and theoreticians,  both organisationally and as individuals, to engage with us on topics  related internet and society, and improve our collective understanding  of this field. To discuss such possibilities, please write to Sunil  Abraham, Executive Director, at sunil@cis-india.org (for policy research), or Sumandro Chattapadhyay, Research Director, at sumandro@cis-india.org (for  academic research), with an indication of the form and the content of  the collaboration you might be interested in. To discuss collaborations  on Indic language Wikipedia projects, write to Tanveer Hasan, Programme  Officer, at &lt;a&gt;tanveer@cis-india.org&lt;/a&gt;.&lt;/p&gt;
&lt;div style="text-align: justify; "&gt;&lt;i&gt;CIS is grateful to its primary donor the Kusuma Trust founded  by Anurag Dikshit and Soma Pujari, philanthropists of Indian origin for  its core funding and support for most of its projects. CIS is also  grateful to its other donors, Wikimedia Foundation, Ford Foundation,  Privacy International, UK, Hans Foundation, MacArthur Foundation, and  IDRC for funding its various projects&lt;/i&gt;.&lt;/div&gt;
&lt;/div&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/about/newsletters/august-2018-newsletter'&gt;https://cis-india.org/about/newsletters/august-2018-newsletter&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>praskrishna</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2018-09-16T05:08:39Z</dc:date>
   <dc:type>Page</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/raw/essays-on-offline-selected-abstracts">
    <title>Essays on 'Offline' - Selected Abstracts</title>
    <link>https://cis-india.org/raw/essays-on-offline-selected-abstracts</link>
    <description>
        &lt;b&gt;In response to a recent call for essays that explore various dimensions of offline lives, we received 22 abstracts. Out of these, we have selected 10 pieces to be published as part of a series titled 'Offline' on the upcoming r@w blog. Please find below the details of the selected abstracts.&lt;/b&gt;
        
&lt;p&gt;&amp;nbsp;&lt;/p&gt;
&lt;h4&gt;1. &lt;a href="#chinar"&gt;Chinar Mehta&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;2. &lt;a href="#cole"&gt;Cole Flor&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;3. &lt;a href="#elishia"&gt;Elishia Vaz&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;4. &lt;a href="#karandeep"&gt;Karandeep Mehra&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;5. &lt;a href="#preeti"&gt;Preeti Mudliar&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;6. &lt;a href="#rianka"&gt;Rianka Roy&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;7. &lt;a href="#simiran"&gt;Simiran Lalvani&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;8. &lt;a href="#srikanth"&gt;Srikanth Lakshmanan&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;9. &lt;a href="#titiksha"&gt;Titiksha Vashist&lt;/a&gt;&lt;/h4&gt;
&lt;h4&gt;10. &lt;a href="#yenn"&gt;Dr. Yenn Lee&lt;/a&gt;&lt;/h4&gt;
&lt;hr /&gt;
&lt;h3 id="chinar"&gt;&lt;strong&gt;Chinar Mehta&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;In September 2017, a student of Banaras Hindu University was allegedly sexually harassed by two persons on a motorcycle while she was walking back to her hostel. Taking the discourse around this event as the starting point, the essay argues that the solutions offered for the safety of women align with the patriarchal notions of surveillance of women. The victim is twice violated; once during the act of sexual harassment, and twice when bodily privacy is exchanged for safety (exemplified by security cameras across the BHU campus). In fact, the ubiquitous presence of security cameras in order to control crime rates makes the safety of the woman’s body contingent to her adherence to social rules.&lt;/p&gt;
&lt;p&gt;The moral panic around the safety of women encourages ways to offer a technological solution to a sociological problem. The body is granted safety insofar as the body is not ‘deviant’. There is a fusion of a ‘synoptic-panoptic’ vision, where not only a few watch the many, but the many also watch the few. Additionally, the essay then engages with the politics of mobile applications like Harassmap or Safetipin, and how offline spaces become online entities with crowdsourced data about how safe it is. Mapping events like sexual harassment on an online map is inscribed with perceptions about class and caste. The caste-patriarchal ideas of the protection of upper-caste women is maintained within these applications. The location and the people who visit or reside in them often collapse as the same; as being perpetrators of sexual crimes, while decontextualising incidents. Instead of a focus on how to make areas safer for all women, the discourse becomes about the avoidance of certain spaces, which may not be an option for the majority of women, especially those belonging to certain castes and classes. Features in mobile applications, specifically to do with location mapping, like Google Maps or Uber, become vehicles for the narratives about gendered security.&lt;/p&gt;
&lt;p&gt;In defining the ‘offline’, the ‘online’ already exists, and the dichotomy is strangely maintained by the use of interactive maps on personal devices. The essay argues for a more nuanced understanding of internalised constructions of safety, and proposes the idea that institutional surveillance has been a way to discipline gendered bodies historically, and that it is continued with the use of technologies. This may be due to state machinery, or even cultural consent, which would then show up the way that features of mobile applications are marketed.&lt;/p&gt;
&lt;h3 id="cole"&gt;&lt;strong&gt;Cole Flor&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;Deactivating: An Escape From the Realities of the Online World&lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;A friend posts travels, unboxing the latest gadget, trying out makeup products even before theyÕre out in the market, and the audience hit ÔlikeÕ but deep inside suddenly feel inadequate about their own lives and ask,
"What am I doing wrong? Why am I not happy like them?"&lt;/p&gt;
&lt;p&gt;The year was 2012 when the earliest of studies on how Social Media contributes to Anxiety went viral.&lt;/p&gt;
&lt;p&gt;Even with the complicated nature of mental illnesses the taboo of it all that kept people tiptoeing around the topic - the news was able to crack the glossy facade of online spaces. Back then, it was ridiculous to think that online content the very representation of freedom of expression, information-sharing, open communities caused users some level of distress that affects their mental state. However, with every story that comes out these days of or relating to mental illnesses and social media, people are no longer in denial that being online has become the worldÕs default state. With that primary connection comes a full spectrum of emotions and perspectives that shifted how society views the self, their community, and their roles in being a ÔnetizenÕ. The blurring of lines of whatÕs considered appropriate content, the multiple performances of everyday life, and the imagery that constitutes "happiness", "satisfaction", "significance", "purpose", and "validation" can be described as overwhelming, disconcerting, and stressful to an extent. For borderline Millennials like myself the generation Digital Natives being offline is now an escape from the harsh realities of the online society.&lt;/p&gt;
&lt;p&gt;These studies shed light on new narratives that recognized how curating the perfect and seamless life online not only affects the users viewing the content but even the content producers themselves, cracking under pressure and giving into the expectation of "Keeping the Image Alive", whatever it takes. Online life gave "peer pressure" a new meaning.&lt;/p&gt;
&lt;p&gt;But users can only deal with so much pressure without sacrificing a part of themselves. During the emergence of social media in early 2000s, users felt the need to go online to escape their personal problems and live in another world where everything seemed easy and possible; where anonymity was powerful and so was virtually traveling in a borderless space where a link opens doors for personal, professional, political, and socio-economic transformation. A quick turn of events, users now wish to escape from the clamor of Twitter threads, Instagram stories, Snaps, and political rants and fake news on Facebook. More and more users deactivate and hibernate, get on board a "social media detox" to rid of the "poison" online content and their [e]nvironments has caused them, all in search for a new something to be called "real".&lt;/p&gt;
&lt;p&gt;This narrative essay explores several dimensions why users choose to deactivate, and how that very choice is more of a symptom of a societal anomaly rather than a simple "break" from the chaotic world of social media. It is written in the perspective of a Digital Native - a person who has an inextricable affinity to digital devices but at the same time, is in touch with the analog way of life. The choice of going offline is not only to focus on what used to be real (a life away from the Internet), but it is to gather wits together, stay away from perfectly curated lives to keep sane, and ultimately, to chase life's curiosities and ambitions without having the need to validate achievements with a Like.&lt;/p&gt;
&lt;h3 id="elishia"&gt;&lt;strong&gt;Elishia Vaz&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;Dynamics of the ‘offline’ self-diagnosis, exploration of the corporeal and the politics of information&lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;The corpus of information on health and related topics in the online sphere has caused much concern in relation to self-diagnosis. Concepts like cyberchondria have emerged with the medicalisation of behaviour that uses online health information to explore the corporeal disabilities of the body. While literature has largely concentrated on individual susceptibilities to Cyberchondria and corresponding negative and positive results of the behaviour, there is little that explores the politics of information that characterises this trope. The behaviours of self-diagnosis and exploration of the corporeal often challenge the symptomatology of the offline allopathic physician. The physician often deals with an informed patient. Yet, the questions remain. If online information drives such offline corporeal exploration, who is left out? Are behaviours analogous to cyberchondria a privilege when viewed from a lens of digital marginalization? Are only those who have access to and can make sense of the online health discourse afforded simultaneous access to their offline corporeal bodies in ways that the digitally marginalized are not? This article uses semi-structured qualitative in-depth interviews with doctors to explore the dynamics of exploring the offline corporeal in the presence of online health information.&lt;/p&gt;
&lt;h3 id="karandeep"&gt;&lt;strong&gt;Karandeep Mehra&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;The Shadow that Social Media Casts: The Doubled Offlines of Online Sociality&lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;In William Gibson’s cyberpunk novel Neuromancer, the protagonist ‘Case’ ‘jacks in’ and ‘jacks out’ of ‘cyberspace’. Yet when ostracized from cyberspace, when there is no more a possibility of jacking in, Case suffers a withdrawal from the ‘SimStim’ – simulated stimulations of cyberspace – and he crumbles in the hollow ache of this
isolation “as the dreams came on in the Japanese night like livewire voodoo, and he'd cry for it, cry in his sleep, and wake alone in the dark, curled in his capsule in some coffin hotel, hands clawed into the bedslab, temper foam bunched between his fingers, trying to reach the console that wasn't there.”&lt;/p&gt;
&lt;p&gt;Neuromancer has already been deemed prophetic by critics and theorists, yet in beginning with Gibson, this paper seeks to throw into relief a problem that has now begun to receive scholarly and academic attention. Namely, the legitimacy of drawing a line between the online and offline, or the virtual and the real. With Case, the real or
the offline only becomes possible within the capacity to access or enter the virtual or online. To think of an offline without this capacity, but after it has become possible, is to confront a detritus, a second offline – a hapless clawing dexterity, with dreams that overrun an articulated, identificatory imagination. Anthropologists like Boellstorff, and media theorists like Yuk Hui, have resolved this problem though they have left unexplained this detritus. Instead they resolve the problem through a tight coupling of the online and offline, and rightly so, dismiss any attempts to think of the real in any way unaffected by the virtual.&lt;/p&gt;
&lt;p&gt;The purpose of this paper, though in agreement with the work of Hui and Boellstorff, and drawing from them, is to restage the problem to incorporate the unexplained detritus. That to understand how our conceptions of the subject must be recast to apprehend the transformations that the internet has wrought, must not resolve the opposition between offline and online. We must, instead, attend to the way the two offlines emerge, and the conceptualization of the threshold that oscillates to constitute them.&lt;/p&gt;
&lt;p&gt;The paper understands these two offlines as emerging in what are called “shitstorms”, or moments of frenzy across social media that incite a whorl of discourse, where the speaking body becomes a medium for the propagation for viral forms. The threshold that constitutes them is the relation of the technical extension that makes this propagation possible. This relation leaves the body in a perpetual state of information entropy – that is as a disordered source of data - which must be ordered to be communicated successfully. This threshold that marks out the phase shift between disorder to order to make possible propagation, makes possible also the shadow of an incommunicable that it casts behind – an incommunicable that when understood through Walter Benjamin’s idea of “the torso of a symbol” can help us recast the subject of a network society, as a subject grounded on this shadow.&lt;/p&gt;
&lt;h3 id="preeti"&gt;&lt;strong&gt;Preeti Mudliar&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;In WiFi Exile: The Offline Subjectivities of Online Women&lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;In telecom policy imaginations that seek to bridge India’s digital divides, public WiFi hotspots are a particular favourite to ensure last mile Internet connectivity in rural areas. As infrastructures, WiFi networks are thought to privilege democratic notions of freedom and connectivity by rendering space salient as networked areas that only require users to have a WiFi enabled device to get online. However, the kind of spaces that WiFi networks occupy are not always accessible by women even though they are ostensibly public in nature. Social norms that restrict and confine women’s mobilities to certain sanctioned areas do not allow their Internet and digital literacies to be visible in the same way as men who are more easily recognized as active Internet and technology users.&lt;/p&gt;
&lt;p&gt;The invisibility of women thus struggles to create a presence as desirable subjects of the Internet that WiFi infrastructures should also address. In a community where WiFi networks was hosted in public spaces, women reported hearing about WiFi and seeing men using WiFi, but had never used it themselves even though they were also active users of the Internet. With its inaccessibility, the WiFi infrastructure was a contradictory presence in the community for the women who found themselves confined to using the Internet with spotty prepaid mobile data plans. Their use and experience of the Internet was thus in many ways diminished and limited and they reported experiencing a state of offlineness in contrast to the men in their community who could frequent the WiFi hotspots and avail of high speed Internet leading to more expansive repertoires of use.&lt;/p&gt;
&lt;p&gt;This essay proposes a reflection on how the offline can be relational and constituted by the way infrastructures compose certain user subjectivities even while they exile others from being a part of their networks. It expands on Brian Larkin’s contention that in addition to their technical affordances, infrastructures are also equally semiotic and aesthetic forms that are oriented towards creating and addressing certain subjects. It thus asks, how do public WiFi deployments unwittingly create and constitute, what Bardzell and Bardzell call, as ‘subject positions’ of WiFi Internet users and non-users? How do these subject positions inform subjectivities of felt experience of the WiFi that translate to experiencing the offline even while being online?&lt;/p&gt;
&lt;h3 id="rianka"&gt;&lt;strong&gt;Rianka Roy&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;Information Offline: Labour, Surveillance and Activism in the Indian IT&amp;amp;ITES Industry&lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;In India the public availability of the internet in the nineties coincided with the beginning of liberalisation. Online connectivity brought the aura of globalization to this country. The internet was a privilege of the few. The Information Technology sector (along with the IT-enabled service industry) had an elite status. Its employees visited, and immigrated to western countries. In fact, India still remains one the major suppliers of cheap labour in the global IT sector.&lt;/p&gt;
&lt;p&gt;Over the years the aura of the internet waned. In Digital India the State now projects the internet as a necessity. However, IT&amp;amp;ITES companies still identify the labour of their ‘white collar’ employees as a superior vocation. This vague claim to sophistication strips the digitally-connected workforce of various labour rights. Long hours, working from home, and surveillance on personal social media are normative practices in this industry. 
I conducted a case study on Indian IT&amp;amp;ITES employees for my doctoral research (2013-2018). It showed that protocols of online conduct influence these employees’ offline behaviour. For example, even without digital intervention, employees engage in manual self-surveillance and peer-surveillance to complement the digital surveillance of their organisations. They defend this naturalised practice as employers’ prerogative. Offline attributes like reflective glass walls in the office interior and exterior, reinforce this organisational culture.&lt;/p&gt;
&lt;p&gt;Online connectivity is so deeply entrenched in this industry that even dissent seeks digital representation. Activist groups like the Forum for IT Employees (FITE) and the Union for IT &amp;amp; ITES (UNITES) run online campaigns parallel to their offline activism—adopting a hybrid method of protest. They have not abandoned the networks that ensnare them. Paradoxically they embody the same principle of exclusivity that their employers enforce on them. In their interviews, some activists have condemned militant trade unionism prevalent in other industries. For them, their online access sets them apart, and above their industrial couterparts. The “salaried bourgeoisie” (Zizek, p.12) refuse to align themselves with other labour unions.&lt;/p&gt;
&lt;p&gt;My paper examines the impact of the near-absence of offline parameters in this industry. On the basis of company policies and interviews of IT&amp;amp;ITES employees, it examines if employees can stand up to digital dominance and secure their rights without conventional modes of offline protests.&lt;/p&gt;
&lt;h3 id="simiran"&gt;&lt;strong&gt;Simiran Lalvani&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;The Offline as a Place of Work: Examining Food Discovery and Delivery by Digital Platforms&lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;Digital platforms for food discovery and delivery are generally viewed as convenient, efficient, allowing discovery of choices beyond the familiar and as reliable sources of information regarding credibility through ratings, comments and photographs.&lt;/p&gt;
&lt;p&gt;The digital divide after demonetisation became more stark as those with access to the online abandoned the offline service providers for their digital counterparts. The adverse impact of this digital divide on offline, informal goods and service providers like local kirana stores, autorickshaw drivers, hawkers has been highlighted and the paradox of formalising the financial system while informalising labour has been pointed out too. In a similar vein, this essay examines continuities and changes in the practices of food discovery and delivery in the context of new digital platforms. How do practices of offline food discovery and delivery respond to the introduction of digital platforms?&lt;/p&gt;
&lt;p&gt;Recently, the Food Safety and Standards Association of India (FSSAI) found that nearly 40 percent of listings on 10 digital platforms like Swiggy and Zomato were of unlicensed food operators. The FSSAI directed these digital platforms to delist these unlicensed entities and also commented that some of the platforms themselves did not have required licenses.&lt;/p&gt;
&lt;p&gt;This essay therefore turns attention away from the impact of digital platforms on offline, informal food operators and towards the digital platforms themselves and the large swathes of informal labour employed in the offline by such platforms. It focuses on location-based gig work4 like delivery to highlight the role of these workers in running the online. It does so in order to avoid obfuscating the role of such workers in making the online seem formal, efficient and reliable. Finally, it asks how working for the online in the offline allows a denial of their status as employees and invisibilisation of such work and workers.&lt;/p&gt;
&lt;h3 id="srikanth"&gt;&lt;strong&gt;Srikanth Lakshmanan&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;The Cash Merchant&lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;The paper explores the various reasons for merchants remaining offline and using cash over digital payments, both willingly and without a choice, various factors leading to it, the rationale for their choices, policy responses by the state and industry in furthering promotion of digital payments. Demonetisation not only made everyone including merchants seek alternatives to cash in order to continue the business but also provided a policy window for digital payments industry to get a faster regulatory, policy clearances, get the government to invest in incentivising digital payments. Despite these, the cash to digital shift has not taken place and the demonetisation trends in increased digital payments across modes reversed after cash was back in the system.&lt;/p&gt;
&lt;p&gt;The paper attempts to document infrastructural, commercial, social issues preventing the adoption and the responses of merchants, industry to various policy prescription/enablement to increase adoption whose outcomes are unclear and have not been evaluated.&lt;/p&gt;
&lt;p&gt;Infrastructural issues include technology, policy, regulatory, industry challenges in expanding the existing infrastructure. The lack of physical, regulatory, legal infrastructure prevents growth and merchants from adopting digital payments. Commercial issues include economics of direct and indirect costs to the merchant incurred in owning, accepting digital payments, commercial considerations of various ecosystem players including banks, payment processors that inhibit adoption. Social issues include awareness, literacy including digital, financial literacy, trust, behaviour shift, convenience, exercising choice towards cash.&lt;/p&gt;
&lt;p&gt;Ever since the demonetisation, there is a heightened activity from industry and various arms of the government has been active in promoting digital payments. Industry-led by banks and fintech ecosystem has built a range of mobile-enabled digital payment platforms/products such wallets, BHIM-UPI, BHIM-Aadhaar, BharatQR to enable asset light merchant acceptance infrastructure, expanded merchant base in addition to catering to the surge in demand of card-accepting PoS machines. The government had undertaken a massive awareness program Digidhan soon after demonetisation and had also set up National Digital Payments Mission to promote, oversee the sustainable growth of digital payments. Various ministries are also adopting digital payments in their functioning. It also aided behavioural shift through cashback, incentivisation schemes, some specifically targeted at merchants, reimbursement of card processing charges for smaller merchants and even has in principle proposed a 20% discount on the GST. It has remained light touch on the regulation by not setting up the regulator even after 18 months of announcing the same.&lt;/p&gt;
&lt;p&gt;The paper will analyse how the efforts of industry and government have been met by the merchant and look at factors which can and cannot be changed with policy interventions and real scope of digital payments in the merchant ecosystem.&lt;/p&gt;
&lt;h3 id="titiksha"&gt;&lt;strong&gt;Titiksha Vashist&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;Byung-Chul Han in his celebrated book “In the Swarm” warns us of the dangers of the mob that is increasingly replacing the ‘crowd’ or collective  which constituted the mass of politics. He states that no true politics is possible in the digital era, where online communities lack a sense of spirit, a “we” that is now a swarm of individuals. Despite his theoretical brilliance, Han forgets that he cannot talk of the digital, the online without the offline. Politics has occurred, and continues to exist in the offline space, using the internet to spread its wings. It is not the online as-is, which has become the subject of philosophy, politics, art and aesthetics that characterises itself alone, sealed off as a space where events occur, identities formed and movements created. It is in fact, the offline that brings the online into being and gives it a myriad of meaning. While access, priviledge, commerce and capital are major themes while discussing internet access, we must not forget that the online is not merely a question of choice or access- but one that is often carefully disabled on purpose to control the offline. In India as well as other parts of the world, the internet has been interrupted for long durations to exercise political control and power, often crippling populations. According to a report by the Software Freedom Law Center (SFLC), an organisation that keeps a track on internet shutdowns in the country, India has seen 244 shutdowns in 2012, of which 108 have been enforced on 2018 alone. These have been concentrated in areas such as  Jammu and Kashmir and the North-East, and in instances of violence and resistance as well.&lt;/p&gt;
&lt;p&gt;An internet shutdown is the digital equivalent of a curfew, and its application raises questions regarding its cause, uses and political intent. The internet as means, as an enabler of political action is seen as threatening, given the shift in the way people today communicate with one another. Internet bans and shutdowns are not only matters of commerce, but also pose the question of politics to understand when and how power is exercised. An offline created out of a shutdown is different- it is curated on purpose and calls for alternative means by which functionalities of daily life, resistance, capital and media occur. This essay aims to explore how the political image of the “sovereign” also enters the digital space to carefully construct, cut- off and marginalized voices, all in the name of state security, and law and order. According to philosopher Carl Schmitt, the sovereign is he who decides on the exception, and the offline is increasingly becoming a space of exception where those who control the digital can influence the political in real time. In this context,  how do we understand the relationship of power and digital access? This essay focuses on three broad questions: (a) Is there a community online capable of political action that is facilitated by the internet? (b) How does power function in internet shutdowns and are they threats to democratic freedom of expression? And finally, (c) How do we begin to unpack the ‘online’ and the ‘offline’ in such a context?&lt;/p&gt;
&lt;h3 id="yenn"&gt;&lt;strong&gt;Dr. Yenn Lee&lt;/strong&gt;&lt;/h3&gt;
&lt;h4&gt;&lt;em&gt;Online consequences of being offline: A gendered tale from South Korea &lt;/em&gt;&lt;/h4&gt;
&lt;p&gt;We hear numerous anecdotes of people facing the consequences of their online activity when offline. Some have lost jobs, have been disciplined in school, or have wound up in court for what they have posted online. However, in comparison, there has been somewhat limited discussion of the reverse scenario, where going about one's day-to-day life offline leads to violations of one's online self.&lt;/p&gt;
&lt;p&gt;This essay is concerned with a new and unparalleled phenomenon in South Korea, locally termed molka. Literally meaning 'hidden camera', molka refers to the genre of women being filmed in the least expected of situations, including cubicles in public restrooms and in the midst of car accidents, and the footage being traded and consumed as entertainment. This is distinct from revenge porn or cyber-stalking where the perpetrators usually target a known or pre-determined individual with the intention of humiliating them or to exercise control. The subjects of molka are victimised for merely existing offline and are mostly unaware that their privacy has been violated until they are recognised by someone who knows them and informs them (or inflicts further harm). In response to the rising trend of molka, tens of thousands of frustrated and infuriated women have staged monthly protest rallies in central Seoul since May 2018, urging government intervention. Ironically, women gathered offline to protest against molka have been subjected to further molka crimes with unconsented photos of themselves at the rallies surfacing online and many have been the target of misogynous attacks.&lt;/p&gt;
&lt;p&gt;Informed by the author's multi-year ethnographic study of technologically mediated and heightened tensions in contemporary South Korean society, this essay provides a succinct yet contextualised account of the molka phenomenon. With particular attention to the ways in which the phenomenon has developed while shifting between offline and online realms, the essay demonstrates the gendered nature of digital privacy and harassment, and the broader implications of this Korean phenomenon for women in other parts of the world.&lt;/p&gt;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/raw/essays-on-offline-selected-abstracts'&gt;https://cis-india.org/raw/essays-on-offline-selected-abstracts&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>sneha-pp</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Researchers at Work</dc:subject>
    
    
        <dc:subject>Offline</dc:subject>
    
    
        <dc:subject>Internet Studies</dc:subject>
    
    
        <dc:subject>RAW Blog</dc:subject>
    

   <dc:date>2018-09-06T14:14:47Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda">
    <title>AI in India: A Policy Agenda</title>
    <link>https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/ai-in-india-a-policy-agenda"&gt;Click&lt;/a&gt; to download the file&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;h1 style="text-align: justify; "&gt;Background&lt;/h1&gt;
&lt;p style="text-align: justify; "&gt;Over the last few months, the Centre for Internet and Society has been engaged in the mapping of use and impact of artificial intelligence in health, banking, manufacturing, and governance sectors in India through the development of a case study compendium.&lt;a href="#_ftn1" name="_ftnref1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Alongside this research, we are examining the impact of Industry 4.0 on jobs and employment and questions related to the future of work in India. We have also been a part of several global conversations on artificial intelligence and autonomous systems. The Centre for Internet and Society is part of the Partnership on Artificial Intelligence, a consortium which has representation from some of most important companies and civil society organisations involved in developments and research on artificial intelligence. We have contributed to the The IEEE Global Initiative on Ethics of Autonomous and Intelligent Systems, and are also a part of a Big Data for Development Global Network, where we are undertaking research towards evolving ethical principles for use of computational techniques. The following are a set of recommendations we have arrived out of our research into artificial intelligence, particularly the sectoral case studies focussed on the development and use of artificial intelligence in India.&lt;/p&gt;
&lt;h1 style="text-align: justify; "&gt;National AI Strategies: A Brief Global Overview&lt;/h1&gt;
&lt;p style="text-align: justify; "&gt;Artificial Intelligence is emerging as  a central policy issue  in several countries. In October 2016, the Obama White House released a report titled, “Preparing for the Future of Artificial Intelligence”&lt;a href="#_ftn2" name="_ftnref2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; delving into a range of issues including application for public goods, regulation, economic impact, global security and fairness issues. The White House also released a companion document called the “National Artificial Intelligence Research and Development Strategic Plan”&lt;a href="#_ftn3" name="_ftnref3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; which laid out a strategic plan for Federally-funded research and development in AI. These were the first of a series of policy documents released by the US towards the role of AI. The United Kingdom announced its 2020 national development strategy and issued a government report to accelerate the application of AI by government agencies while in 2018 the Department for Business, Energy, and Industrial Strategy released the Policy Paper - AI Sector Deal.&lt;a href="#_ftn4" name="_ftnref4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The Japanese government released it paper on Artificial Intelligence Technology Strategy in 2017.&lt;a href="#_ftn5" name="_ftnref5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The European Union launched "SPARC," the world’s largest civilian robotics R&amp;amp;D program, back in 2014.&lt;a href="#_ftn6" name="_ftnref6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Over the last year and a half, Canada,&lt;a href="#_ftn7" name="_ftnref7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; China,&lt;a href="#_ftn8" name="_ftnref8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; the UAE,&lt;a href="#_ftn9" name="_ftnref9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Singapore,&lt;a href="#_ftn10" name="_ftnref10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; South Korea&lt;a href="#_ftn11" name="_ftnref11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, and France&lt;a href="#_ftn12" name="_ftnref12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; have announced national AI strategy documents while 24 member States in the EU have committed to develop national AI policies that reflect a “European” approach to AI &lt;a href="#_ftn13" name="_ftnref13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;. Other countries such as Mexico and Malaysia are in the process of evolving their national AI strategies. What this suggests is that AI is quickly emerging as central to national plans around the development of science and technology as well as economic and national security and development. There is also a focus on investments enabling AI innovation in critical national domains as a means of addressing key challenges facing nations. India has followed this trend and in 2018 the government published two AI roadmaps - the Report of Task Force on Artificial Intelligence by the AI Task Force constituted by the Ministry of Commerce and Industry&lt;a href="#_ftn14" name="_ftnref14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and the National Strategy for Artificial Intelligence by Niti Aayog.&lt;a href="#_ftn15" name="_ftnref15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Some of the key themes running across the National AI strategies globally are spelt out below.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Economic Impact of AI&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;A common thread that runs across the different national approaches to AI is the belief in the significant economic impact of AI, that it will likely increase productivity and create wealth. The British government estimated that AI could add $814 billion to the UK economy by 2035. The UAE report states that by 2031, AI will help boost the country’s GDP by 35 per cent, reduce government costs by 50 per cent. Similarly, China estimates that the core AI market will be worth 150 billion RMB ($25bn) by 2020, 400 billion RMB ($65bn) and one trillion RMB ($160bn) by 2030. The impact of adoption of AI and automation of labour and employment is also a key theme touched upon across the strategies. For instance, the White House Report of October 2016 states the US workforce is unprepared – and that a serious education programme, through online courses and in-house schemes, will be required.&lt;a href="#_ftn16" name="_ftnref16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;State Funding&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Another key trend exhibited in all national strategies towards AI has been a commitment by the respective governments towards supporting research and development in AI. The French government has stated that it intends to invest €1.5 billion ($1.85 billion) in AI research in the period through to 2022. The British government’s recommendations, in late 2017, were followed swiftly by a promise in the autumn budget of new funds, including at least £75 million for AI. Similarly, the the Canadian government put together a $125-million ‘pan-Canadian AI strategy’ last year.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;AI for Public Good&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;The use of AI for Public Good is a significant focus of most AI policies. The biggest justification for AI innovation as a legitimate objective of public policy is its promised impact towards improvement of  people’s lives by helping to solve some of the world’s greatest challenges and inefficiencies, and emerge as a transformative technology, much like mobile computing. These public good uses of AI are emerging across sectors such as transportation, migration, law enforcement and justice system, education, and agriculture..&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;National Institutions leading AI research&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Another important trend which was  key to the implementation of national AI strategies is the creation or development of well-funded centres of excellence which would serve as drivers of research and development and leverage synergies with the private sector. The French Institute for Research in Computer Science and Automation (INRIA) plans to create a national AI research program with five industrial partners. In UK, The Alan Turing Institute is likely to emerge as the national institute for data science, and an AI Council would be set up to manage inter-sector initiatives and training. In Canada, Canadian Institute for Advanced Research (CIFAR) has been tasked with implementing their AI strategy. Countries like Japan has a less centralised structure with the creation of strategic council for AI technology’ to promote research and development in the field, and manage a number of key academic institutions, including NEDO and its national ICT (NICT) and science and tech (JST) agencies. These institutions are key to successful implementation of national agendas and policies around AI.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;AI, Ethics and Regulation&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Across the AI strategies — ethical dimensions and regulation of AI were highlighted as concerns that needed to be addressed. Algorithmic transparency and explainability, clarity on liability, accountability and oversight, bias and discrimination, and privacy are ethical  and regulatory questions that have been raised. Employment and the future of work is another area of focus that has been identified by countries.  For example, the US 2016 Report reflected on if existing regulation is adequate to address risk or if adaption is needed by examining the use of AI in automated vehicles. In the policy paper - AI Sector Deal - the UK proposes four grand challenges: AI and Data Economy, Future Mobility, Clean Growth, and Ageing Society. The Pan Canadian Artificial Intelligence Strategy focuses on developing global thought leadership on the economic, ethical, policy, and legal implications of advances in artificial intelligence.&lt;a href="#_ftn17" name="_ftnref17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The above are important factors and trends to take into account and to different extents have been reflected in the two national roadmaps for AI. Without adequate institutional planning, there is a risk of national strategies being too monolithic in nature.  Without sufficient supporting mechanisms in the form of national institutions which would drive the AI research and innovation, capacity building and re-skilling of workforce to adapt to changing technological trends, building regulatory capacity to address new and emerging issues which may disrupt traditional forms of regulation and finally, creation of an environment of monetary support both from the public and private sector it becomes difficult to implement a national strategy and actualize the potentials of AI . As stated above, there is also a need for identification of key national policy problems which can be addressed by the use of AI, and the creation of a framework with institutional actors to articulate the appropriate plan of action to address the problems using AI. There are several ongoing global initiatives which are in the process of trying to articulate key principles for ethical AI. These discussions also feature in some of the national strategy documents.&lt;/p&gt;
&lt;h1 style="text-align: justify; "&gt;Key considerations for AI policymaking in India&lt;/h1&gt;
&lt;p style="text-align: justify; "&gt;As mentioned above, India has published two national AI strategies. We have responded to both of these here&lt;a href="#_ftn18" name="_ftnref18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and here.&lt;a href="#_ftn19" name="_ftnref19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Beyond these two roadmaps, this policy brief reflects on a number of factors that need to come together for India to leverage and adopt AI across sectors, communities, and technologies successfully.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Resources, Infrastructure, Markets, and Funding&lt;/h2&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Ensure adequate government funding and investment in R&amp;amp;D&lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;As mentioned above, a survey of all major national strategies on AI reveals a significant financial commitment from governments towards research and development surrounding AI. Most strategy documents speak of the need to safeguard national ambitions in the race for AI development. In order to do so it is imperative to have a national strategy for AI research and development, identification of nodal agencies to enable the process, and creation of institutional capacity to carry out cutting edge research.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Most jurisdictions such as Japan, UK and China have discussed collaborations between the industry and government to ensure greater investment into AI research and development. The European Union has spoken using the existing public-private partnerships, particularly in robotics and big data to boost investment by over one and half times.&lt;a href="#_ftn20" name="_ftnref20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; To some extent, this  step has been initiated by the Niti Aayog strategy paper. The paper lists out enabling factors for the widespread adoption of AI and maps out specific government agencies and ministries that could promote such growth. In February 2018, the Ministry of Electronics and IT also set up four committees to prepare a roadmap for a national AI programme. The four committees are presently studying AI in context of citizen centric services; data platforms; skilling, reskilling and R&amp;amp;D; and legal, regulatory and cybersecurity perspectives.&lt;a href="#_ftn21" name="_ftnref21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Democratize AI technologies and data&lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Clean, accurate, and appropriately curated data is essential for training algorithms. Importantly, large quantities of data alone does not translate into better results. Accuracy and curation of data should be prerequisites to quantity of data. Frameworks to generate and access larger quantity of data should not hinge on models of centralized data stores. The government and the private sector are generally gatekeepers to vast amounts of data and technologies. Ryan Calo has called this an issue of data parity,&lt;a href="#_ftn22" name="_ftnref22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; where only a few well established leaders in the field have the ability to acquire data and build datasets. Gaining access to data comes with its own questions of ownership, privacy, security, accuracy, and completeness. There are a number of different approaches and techniques that can be adopted to enable access to data.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Open Government Data &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Robust open data sets is one way in which access can be enabled. Open data is particularly important for small start-ups as they build prototypes. Even though India is a data dense country and has in place a National Data and Accessibility Policy India does not yet have robust and comprehensive open data sets across sectors and fields.  Our research found that this is standing as an obstacle to innovation in the Indian context as startups often turn to open datasets in the US and Europe for developing prototypes. Yet, this is problematic because the demography represented in the data set is significantly different resulting in the development of solutions that are trained to a specific demographic, and thus need to be re-trained on Indian data. Although AI is technology agnostic, in the cases of different use cases of data analysis, demographically different training data is not ideal. This is particularly true for certain categories such as health, employment, and financial data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The government can play a key role in providing access to datasets that will help the functioning and performance of AI technologies. The Indian government has already made a move towards accessible datasets through the Open Government Data Platform which provides access to a range of data collected by various ministries. Telangana has developed its own Open Data Policy which has stood out for its transparency and the quality of data collected and helps build AI based solutions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In order to encourage and facilitate innovation, the central and state governments need to actively pursue and implement the National Data and Accessibility Policy.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Access to Private Sector Data &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The private sector is the gatekeeper to large amounts of data. There is a need to explore different models of enabling access to private sector data while ensuring and protecting users rights and company IP. This data is often considered as a company asset and not shared with other stakeholders. Yet, this data is essential in enabling innovation in AI.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Amanda Levendowski states that ML practitioners have essentially three options in securing sufficient data— build the databases themselves, buy the data, or use data in the public domain. The first two alternatives are largely available to big firms or institutions. Smaller firms often end resorting to the third option but it carries greater risks of bias.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A solution could be federated access, with companies allowing access to researchers and developers to encrypted data without sharing the actual data.  Another solution that has been proposed is ‘watermarking’ data sets.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Data sandboxes have been promoted as tools for enabling innovation while protecting privacy, security etc. Data sandboxes allow companies access to large anonymized data sets under controlled circumstances. A regulatory sandbox is a controlled environment with relaxed regulations that allow the product to be tested thoroughly before it is launched to the public. By providing certification and safe spaces for testing, the government will encourage innovation in this sphere. This system has already been adopted in Japan where there are AI specific regulatory sandboxes to drive society 5.0.160 data sandboxes are tools that can be considered within specific sectors to enable innovation. A sector wide data sandbox was also contemplated by TRAI.&lt;a href="#_ftn23" name="_ftnref23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; A sector specific governance structure can establish a system of ethical reviews of underlying data used to feed the AI technology along with data collected in order to ensure that this data is complete, accurate and has integrity. A similar system has been developed by Statistics Norway and the Norwegian Centre for Research Data.&lt;a href="#_ftn24" name="_ftnref24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;AI Marketplaces&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The National Roadmap for Artificial Intelligence by NITI Aayog proposes the creation of a National AI marketplace that is comprised of a data marketplace, data annotation marketplace, and deployable model marketplace/solutions marketplace.&lt;a href="#_ftn25" name="_ftnref25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; In particular, it is envisioned that the data marketplace would be based on blockchain technology and have the features of: traceability, access controls, compliance with local and international regulations, and robust price discovery mechanism for data. Other questions that will need to be answered center around pricing and ensuring equal access. It will also be interesting how the government incentivises the provision of data by private sector companies. Most data marketplaces that are emerging are initiated by the private sector.&lt;a href="#_ftn26" name="_ftnref26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; A government initiated marketplace has the potential to bring parity to some of the questions raised above, but it should be strictly limited to private sector data in order to not replace open government data.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Open Source Technology &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;A number of companies are now offering open source AI technologies. For example, TensorFlow, Keras, Scikit-learn, Microsoft Cognitive Toolkit, Theano, Caffe, Torch, and Accord.NET.&lt;a href="#_ftn27" name="_ftnref27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The government should incentivise and promote open source AI technologies towards harnessing and accelerating research in AI.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Re-thinking Intellectual Property Regimes &lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Going forward it will be important for the government to develop an intellectual property framework that encourages innovation. AI systems are trained by reading, viewing, and listening to copies of human-created works. These resources such as books, articles, photographs, films, videos, and audio recordings are all key subjects of copyright protection. Copyright law grants exclusive rights to copyright owners, including the right to reproduce their works in copies, and one who violates one of those exclusive rights “is an infringer of copyright.&lt;a href="#_ftn28" name="_ftnref28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The enterprise of AI is, to this extent, designed to conflict with tenets of copyright law, and after the attempted ‘democratization’ of copyrighted content by the advent of the Internet, AI poses the latest challenge to copyright law. At the centre of this challenge is the fact that it remains an open question whether a copy made to train AI is a “copy” under copyright law, and consequently whether such a copy is an infringement.&lt;a href="#_ftn29" name="_ftnref29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The fractured jurisprudence on copyright law is likely to pose interesting legal questions with newer use cases of AI. For instance, Google has developed a technique called federated learning, popularly referred to as on-device ML, in which training data is localised to the originating mobile device rather than copying data to a centralized server.&lt;a href="#_ftn30" name="_ftnref30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The key copyright questions here is whether decentralized training data stored in random access memory (RAM) would be considered as “copies”.&lt;a href="#_ftn31" name="_ftnref31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; There are also suggestions that copies made for the purpose of training of machine learning systems may be so trivial or de minimis that they may not qualify as infringement.&lt;a href="#_ftn32" name="_ftnref32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; For any industry to flourish, there needs to be legal and regulatory clarity and it is imperative that these copyright questions emerging out of use of AI be addressed soon.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As noted in our response to the Niti Aayog national AI strategy  “&lt;i&gt;The report also blames the current Indian  Intellectual Property regime for being “unattractive” and averse to incentivising research and adoption of AI. Section 3(k) of Patents Act exempts algorithms from being patented, and the Computer Related Inventions (CRI) Guidelines have faced much controversy over the patentability of mere software without a novel hardware component. The paper provides no concrete answers to the question of whether it should be permissible to patent algorithms, and if yes, to  to what extent. Furthermore, there needs to be a standard either in the CRI Guidelines or the Patent Act, that distinguishes between AI algorithms and non-AI algorithms. Additionally, given that there is no historical precedence on the requirement of patent rights to incentivise creation of AI,  innovative investment protection mechanisms that have lesser negative externalities, such as compensatory liability regimes would be more desirable.  The report further failed to look at the issue holistically and recognize that facilitating rampant patenting can form a barrier to smaller companies from using or developing  AI. This is important to be cognizant of given the central role of startups to the AI ecosystem in India and because it can work against the larger goal of inclusion articulated by the report.”&lt;a href="#_ftn33" name="_ftnref33"&gt;&lt;sup&gt;&lt;b&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/b&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/i&gt;&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;National infrastructure to support domestic development &lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Building a robust national Artificial Intelligence solution requires establishing adequate indigenous  infrastructural capacity for data storage and processing.  While this should not necessarily extend to mandating data localisation as the draft privacy bill has done, capacity should be developed to store data sets generated by indigenous nodal points.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;AI Data Storage &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Capacity needs to increase as the volume of data that needs to be processed in India increases. This includes ensuring effective storage capacity, IOPS (Input/Output per second) and ability to process massive amounts of data.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;AI Networking Infrastructure&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Organizations will need to upgrade their networks in a bid to upgrade and optimize efficiencies of scale. Scalability must be undertaken on a high priority which will require a high-bandwidth, low latency and creative architecture, which requires appropriate last mile data curation enforcement.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Conceptualization and Implementation&lt;/h2&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Awareness, Education, and Reskilling &lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Encouraging AI research&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;This can be achieved by collaborations between the government and large companies to promote accessibility and encourage innovation through greater R&amp;amp;D spending. The Government of Karnataka, for instance, is collaborating with NASSCOM to set up a Centre of Excellence for Data Science and Artificial Intelligence (CoE-DS&amp;amp;AI) on a public-private partnership model to “accelerate the ecosystem in Karnataka by providing the impetus for the development of data science and artificial intelligence across the country.” Similar centres could be incubated in hospitals and medical colleges in India.  Principles of public funded research such as FOSS, open standards, and open data should be core to government initiatives to encourage research.  The Niti Aaayog report proposes a two tier integrated approach towards accelerating research, but is currently silent on these principles.&lt;a href="#_ftn34" name="_ftnref34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Therefore,as suggested by the NITI AAYOG Report, the government needs to set up ‘centres of excellence’. Building upon the stakeholders identified in the NITI AAYOG Report, the centers of excellence should  involve a wide range of experts including lawyers, political philosophers, software developers, sociologists and gender studies from diverse organizations including government, civil society,the private sector and research institutions  to ensure the fair and efficient roll out of the technology.&lt;a href="#_ftn35" name="_ftnref35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; An example is the Leverhulme Centre for the Future of Intelligence set up by the Leverhulme Foundation at the University of Cambridge&lt;a href="#_ftn36" name="_ftnref36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and the AI Now Institute at New York University (NYU)&lt;a href="#_ftn37" name="_ftnref37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; These research centres bring together a wide range of experts from all over the globe.&lt;a href="#_ftn38" name="_ftnref38"&gt;&lt;sup&gt;&lt;sup&gt;[38]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Skill sets to successfully adopt AI&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Educational institutions should provide opportunities for students to skill themselves to adapt to adoption of AI, and also push for academic programmes around AI. It is also important to introduce computing technologies such as AI in medical schools in order to equip doctors to adopt the technical skill sets and ethics required to use integrate AI in their practices. Similarly, IT institutes could include courses on ethics, privacy, accountability etc. to equip engineers and developers with an understanding of the questions surrounding the technology and services they are developing.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Societal Awareness Building&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Much of the discussion around skilling for AI is in the context of the workplace, but there is a need for awareness to be developed across society for a broader adaptation to AI. The Niti Aayog report takes the first steps towards this - noting the importance of highlighting the benefits of AI to the public. The conversation needs to go beyond this towards enabling individuals to recognize and adapt to changes that might be brought about - directly and indirectly - by AI - inside and outside of the workplace. This could include catalyzing a shift in mindset to life long learning and discussion around potential implications of human-machine interactions.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Early Childhood Awareness and Education &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;It is important that awareness around AI begins in early childhood. This is  in part because children already interact with AI and increasingly will do so and thus awareness is needed in how AI works and can be safely and ethically used. It is also important to start building the skills that will be necessary in an AI driven society from a young age.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Focus on marginalised groups &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Awareness, skills, and education should be targeted at national minorities including rural communities, the disabled, and women. Further, there should be a concerted  focus on communities that are under-represented in the tech sector-such as women and sexual minorities-to ensure that the algorithms themselves and the community working on AI driven solutions are holistic and cohesive. For example, Iridescent focuses on girls, children, and families to enable them to adapt to changes like artificial intelligence through promoting curiosity, creativity, and perseverance to become lifelong learners.&lt;a href="#_ftn39" name="_ftnref39"&gt;&lt;sup&gt;&lt;sup&gt;[39]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; This will be important towards ensuring that AI does not deepen societal  and global inequalities including digital divides. Widespread use of AI will undoubtedly require re-skilling various stakeholders in order to make them aware of the prospects of AI.&lt;a href="#_ftn40" name="_ftnref40"&gt;&lt;sup&gt;&lt;sup&gt;[40]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Artificial Intelligence itself can be used as a resource in the re-skilling process itself-as it would be used in the education sector to gauge people’s comfort with the technology and plug necessary gaps.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Improved access to and awareness of Internet of Things&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The development of smart content or Intelligent Tutoring Systems in the education can only be done on a large scale if both the teacher and the student has access to and feel comfortable with using basic IoT devices . A U.K. government report has suggested that any skilled workforce  using AI should be a mix of those with a basic understanding responsible for implementation at the grassroots level , more informed users and specialists with advanced development and implementation skills.&lt;a href="#_ftn41" name="_ftnref41"&gt;&lt;sup&gt;&lt;sup&gt;[41]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;The same logic applies to the agriculture sector, where the government is looking to develop smart weather-pattern tracking applications. A potential short-term solution may lie in ensuring that key actors have access to an  IoT device so that he/she may access digital and then impart the benefits of access to proximate individuals. In the education sector, this would involve ensuring that all teachers have access to and are competent in using an IoT device. In the agricultural sector, this may involve equipping each village with a set of IoT devices so that the information can be shared among concerned individuals. Such an approach recognizes that AI is not the only technology catalyzing change - for example industry 4.0 is understood as  comprising of a suite of technologies including but not limited to AI.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Public Discourse&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;As solutions bring together and process vast amounts of granular data, this data can be from a variety of public and private sources - from third party sources or generated by the AI and its interaction with its environment. This means that very granular and non traditional data points are now going into decision making processes. Public discussion is needed to understand social and cultural norms and standards and how these might translate into acceptable use norms for data in various sectors.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Coordination and collaboration across stakeholders &lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Development of Contextually Nuanced and Appropriate AI Solutions &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Towards ensuring effectiveness and  accuracy it is important that solutions used in India are developed to account for cultural nuances and diversity. From our research this could be done in a number of ways ranging from: training AI solutions used in health on data from Indian patients to account for differences in demographics&lt;a href="#_ftn42" name="_ftnref42"&gt;&lt;sup&gt;&lt;sup&gt;[42]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;,  focussing on  natural language voice recognition to account for the diversity in languages and digital skills in the Indian context,&lt;a href="#_ftn43" name="_ftnref43"&gt;&lt;sup&gt;&lt;sup&gt;[43]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and developing and applying AI to reflect societal norms and understandings.&lt;a href="#_ftn44" name="_ftnref44"&gt;&lt;sup&gt;&lt;sup&gt;[44]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Continuing, deepening, and expanding  partnerships for innovation&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Continued innovation while holistically accounting for the challenges that AI poses  will be key for actors in the different sectors to remain competitive. As noted across case study reports partnerships is key in  facilitating this innovation and filling capacity gaps. These partnerships can be across sectors, institutions, domains, geographies, and stakeholder groups. For example:  finance/ telecom, public/private, national/international, ethics/software development/law, and academia/civil society/industry/government.  We would emphasize collaboration between actors across different domains and stakeholder groups as developing holistics AI solutions demands multiple understandings and perspectives.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Coordinated Implementation&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Key sectors in India need to  begin to take steps to consider sector wide coordination in implementing AI. Potential stress and system wide vulnerabilities would need to be considered when undertaking this. Sectoral regulators such as RBI, TRAI, and the Medical Council of India are ideally placed to lead this coordination.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Develop contextual standard benchmarks to assess quality of algorithms&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;In part because of the nacency of the development and implementation of AI,  towards enabling effective assessments of algorithms to understand impact and informing selection by institutions adopting solutions, standard benchmarks can help in assessing quality and appropriateness of algorithms. It may be most effective to define such benchmarks at a sectoral level (finance etc.) or by technology and solution (facial recognition etc.).  Ideally, these efforts would be led by the government in collaboration with multiple stakeholders.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Developing a framework for working with the private sector for use-cases by the government&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;There are various potential use cases the government could adopt in order to use AI as a tool for augmenting public service delivery  in India by the government. However, given lack of capacity -both human resource and technological-means that entering into partnerships with the private sector may enable more fruitful harnessing of AI- as has been seen with existing MOUs in the agricultural&lt;a href="#_ftn45" name="_ftnref45"&gt;&lt;sup&gt;&lt;sup&gt;[45]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and healthcare sectors.&lt;a href="#_ftn46" name="_ftnref46"&gt;&lt;sup&gt;&lt;sup&gt;[46]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; However, the partnership must be used as a means to build capacity within the various nodes in the set-up rather than relying  only on  the private sector partner to continue delivering sustainable solutions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Particularly, in the case of use of AI for governance, there is a need to evolve a clear parameter to do impact assessment prior to the deployment of the technology that clearly tries to map estimated impact of the technology of clearly defined objectives, which must also include the due process, procedural fairness and human rights considerations . As per Article 12 of the Indian Constitution, whenever the government is exercising a public function, it is bound by the entire gamut of fundamental rights articulated in Part III of the Constitution. This is a crucial consideration the government will have to bear in mind whenever it uses AI-regardless of the sector.  In all cases of public service delivery, primary accountability for the use of AI should lie with the government itself, which means that a cohesive and uniform framework which regulates these partnerships must be conceptualised. This framework should incorporate : (a) Uniformity in the wording and content of contracts that the government signs, (b) Imposition of obligations of transparency and accountability on the developer to ensure that the solutions developed are in conjunction with constitutional standards and (c) Continuous evaluation of private sector developers by the government and experts to ensure that they are complying with their obligations.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Defining Safety Critical AI&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The implications of AI differs according to use. Some countries, such as the EU, are beginning to define sectors where AI should play the role of augmenting jobs as opposed to functioning autonomously. The Global Partnership on AI is has termed sectors where AI tools supplement or replace human decision making in areas such as health and transportation as ‘safety critical AI’ and is  researching best practices for application of AI in these areas.  India will need to think through if there is a threshold that needs to be set and more stringent regulation applied. In addition to uses in health and transportation, defense and law enforcement would be another sector where certain use would require more stringent regulation.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Appropriate certification mechanisms&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Appropriate certificate mechanisms will be important in ensuring the quality of AI solutions.   A significant barrier to the adoption of AI  in some sectors  in India is acceptability of results, which include direct results arrived at using AI technologies as well as opinions provided by practitioners that are influenced/aided by AI technologies. For instance, start-ups in the healthcare sectors often find that they are asked to show proof of a clinical trial when presenting their products to doctors and hospitals, yet clinical trials are expensive, time consuming and inappropriate forms of certification for medical devices and digital health platforms. Startups also face difficulty in conducting clinical trials as there is lack of a clear regulation to adhere to. They believe that while clinical trials are a necessity with respect to drugs, the process often results in obsolescence of the technology by the time it is approved in the context of AI. Yet, medical practitioners are less trusting towards startups who do not have approval from a national or international authority. A possible and partial solution suggested by these startups is to enable doctors to partner with them to conduct clinical trials together. However, such partnerships cannot be at the expense of rigour, and adequate protections need to be built in the enabling regulation.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Serving as a voice for emerging economies in the global debate on AI&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;While India should utilise Artificial Intelligence in the economy as a means of occupying a driving role in the global debate around AI, it must be cautious before allowing the use of Indian territory and infrastructure as a test bed for other emerging economies without considering the ramifications that the utilisation of AI may have for Indian citizens. The NITI AAYOG Report envisions  India as leverage AI as a ‘garage’ for emerging economies.&lt;a href="#_ftn47" name="_ftnref47"&gt;&lt;sup&gt;&lt;sup&gt;[47]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; While there are certain positive connotations of this suggestion in so far as this propels India to occupy a leadership position-both technically and normatively in determining future use cases for AI in India,, in order to ensure that Indian citizens are not used as test subjects in this process, guiding principles could be developed such as requiring that projects have clear benefits for India.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Frameworks for Regulation&lt;/h2&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;National legislation&lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Data Protection Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;India is a data-dense country, and the lack of a robust privacy  regime, allows the public and private sector easier access to large amounts of data than might be found in other contexts with stringent privacy laws. India also lacks a formal regulatory regime around anonymization. In our research we found that this gap does not always translate into a gap in practice, as some start up companies have  adopted  self-regulatory practices towards protecting privacy such as of anonymising data they receive before using it further, but it does result in unclear and unharmonized practice..&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In order to ensure rights and address emerging challenges to the same posed by artificial intelligence, India needs to enact   a comprehensive privacy legislation applicable to the private and public sector to regulate the use of data, including use in artificial intelligence. A privacy legislation will also have to address more complicated questions such as the use of publicly available data for training algorithms, how traditional data categories (PI vs. SPDI - meta data vs. content data etc.) need to be revisited in light of AI,  and how can a privacy legislation be applied to autonomous decision making. Similarly, surveillance laws may need to be revisited in light of AI driven technologies such as facial recognition, UAS, and self driving cars as they provide new means of surveillance to the state and have potential implications for other rights such as the right to freedom of expression and the right to assembly.  Sectoral protections can compliment and build upon the baseline protections articulated in a national privacy legislation.&lt;a href="#_ftn48" name="_ftnref48"&gt;&lt;sup&gt;&lt;sup&gt;[48]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; In August 2018 the Srikrishna Committee released a draft data protection bill for India. We have reflected on how the Bill addresses AI. Though the Bill brings under its scope companies deploying emerging technologies and subjects them to the principles of privacy by design and data impact assessments, the Bill is silent on key rights and responsibilities, namely the responsibility of the data controller to explain the logic and impact of automated decision making including profiling to data subjects and the right to opt out of automated decision making in defined circumstances.&lt;a href="#_ftn49" name="_ftnref49"&gt;&lt;sup&gt;&lt;sup&gt;[49]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Further, the development of technological solutions to address the dilemma between AI and the need for access to larger quantities of data for multiple purposes and privacy should be emphasized.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Discrimination Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;A growing area of research globally is the social consequences of AI with a particular focus on its tendency to replicate or amplify existing and structural inequalities. Problems such as data invisibility of certain excluded groups,&lt;a href="#_ftn50" name="_ftnref50"&gt;&lt;sup&gt;&lt;sup&gt;[50]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; the myth of data objectivity and neutrality,&lt;a href="#_ftn51" name="_ftnref51"&gt;&lt;sup&gt;&lt;sup&gt;[51]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and data monopolization&lt;a href="#_ftn52" name="_ftnref52"&gt;&lt;sup&gt;&lt;sup&gt;[52]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; contribute to the disparate impacts of big data and AI. So far much of the research on this subject has not moved beyond the exploratory phase as is reflected in the reports released by the White House&lt;a href="#_ftn53" name="_ftnref53"&gt;&lt;sup&gt;&lt;sup&gt;[53]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and Federal Trade Commission&lt;a href="#_ftn54" name="_ftnref54"&gt;&lt;sup&gt;&lt;sup&gt;[54]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; in the United States. The biggest challenge in addressing discriminatory and disparate impacts of AI is ascertaining “where value-added personalization and segmentation ends and where harmful discrimination begins.”&lt;a href="#_ftn55" name="_ftnref55"&gt;&lt;sup&gt;&lt;sup&gt;[55]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Some prominent cases where AI can have discriminatory impact are denial of loans based on attributes such as neighbourhood of residence as a proxies which can be used to circumvent anti-discrimination laws which prevent adverse determination on the grounds of race, religion, caste or gender, or adverse findings by predictive policing against persons who are unfavorably represented in the structurally biased datasets used by the law enforcement agencies. There is a dire need for disparate impact regulation in sectors which see the emerging use of AI.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Similar to disparate impact regulation, developments in AI, and its utilisation, especially in credit rating, or risk assessment processes could create complex problems that cannot be solved only by the principle based regulation. Instead, regulation intended specifically to avoid outcomes that the regulators feel are completely against the consumer, could be an additional tool that increases the fairness, and effectiveness of the system.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Competition Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The conversation of use of competition or antitrust laws to govern AI is still at an early stage. However, the emergence of numerous data driven mergers or acquisitions such as Yahoo-Verizon, Microsoft-LinkedIn and Facebook-WhatsApp have made it difficult to ignore the potential role of competition law in the governance of data collection and processing practices. It is important to note that the impact of Big Data goes far beyond digital markets and the mergers of companies such as Bayer, Climate Corp and Monsanto shows that data driven business models can also lead to the convergence of companies from completely different sectors as well. So far, courts in Europe have looked at questions such as the impact of combination of databases on competition&lt;a href="#_ftn56" name="_ftnref56"&gt;&lt;sup&gt;&lt;sup&gt;[56]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and have held that in the context of merger control, data can be a relevant question if an undertaking achieves a dominant position through a merger, making it capable of gaining further market power through increased amounts of customer data. The evaluation of the market advantages of specific datasets has already been done in the past, and factors which have been deemed to be relevant have included whether the dataset could be replicated under reasonable conditions by competitors and whether the use of the dataset was likely to result in a significant competitive advantage.&lt;a href="#_ftn57" name="_ftnref57"&gt;&lt;sup&gt;&lt;sup&gt;[57]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; However, there are limited circumstances in which big data meets the four traditional criteria for being a barrier to entry or a source of sustainable competitive advantage — inimitability, rarity, value, and non-substitutability.&lt;a href="#_ftn58" name="_ftnref58"&gt;&lt;sup&gt;&lt;sup&gt;[58]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Any use of competition law to curb data-exclusionary or data-exploitative practices will first have to meet the threshold of establishing capacity for a firm to derive market power from its ability to sustain datasets unavailable to its competitors. In this context the peculiar ways in which network effects, multi-homing practices and how dynamic the digital markets are, are all relevant factors which could have both positive and negative impacts on competition. There is a need for greater discussion on data as a sources of market power in both digital and non-digital markets, and how this legal position can used to curb data monopolies, especially in light of government backed monopolies for identity verification and payments in India.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Consumer Protection Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The Consumer Protection Bill, 2015, tabled in the Parliament towards the end of the monsoon session has introduced an expansive definition of the term “unfair trade practices.” The definition as per the Bill includes the disclosure “to any other person any personal information given in confidence by the consumer.” This clause excludes from the scope of unfair trade practices, disclosures under provisions of any law in force or in public interest. This provision could have significant impact on the personal data protection law in India. Alongside, there is also a need to ensure that principles such as safeguarding consumers personal information in order to ensure that the same is not used to their detriment are included within the definition of unfair trade practices. This would provide consumers an efficient and relatively speedy forum to contest adverse impacts on them of data driven decision-making.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Sectoral Regulation &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Our research into sectoral case studies revealed that there are a number of existing sectoral laws and policies that are applicable to aspects of AI. For example, in the health sector there is the Medical Council Professional Conduct, Etiquette, and Ethics Regulations 2002, the Electronic Health Records Standards 2016, the draft Medical Devices Rules 2017, the draft Digital Information Security in Healthcare Act.  In the finance sector there is the Credit Information Companies (Regulation) Act 2005 and 2006, the Securities and Exchange Board of India (Investment Advisers) Regulations, 2013, the Payment and Settlement Systems Act, 2007, the Banking Regulations Act 1949, SEBI guidelines on robo advisors etc. Before new regulations, guidelines etc are developed - a comprehensive exercise needs to be undertaken at a sectoral level to understand if 1. sectoral policy adequately addresses the changes being brought about by AI 2. If it does not - is an amendment possible and if not - what form of policy would fill the gap.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Principled approach&lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Transparency&lt;/b&gt;&lt;/h4&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Audits&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;Internal and external audits can be mechanisms towards creating transparency about the processes and results of AI solutions as they are implemented in a specific context. Audits can take place while a solution is still in ‘pilot’ mode and on a regular basis during implementation. For example,  in the Payment Card Industry (PCI) tool,  transparency is achieved through frequent audits, the results of which are simultaneously and instantly transmitted to the regulator and the developer. Ideally parts of the results of the audit are also made available to the public, even if the entire results are not shared.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Tiered Levels of Transparency&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;There are different levels and forms of transparency as well as different ways of achieving the same. The type and form of transparency can be tiered and dependent on factors such as criticality of function, potential direct and indirect harm, sensitivity of data involved, actor using the solution . The audience can also be tiered and could range from an individual user to senior level positions, to oversight bodies.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Human Facing Transparency&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;It will be important for India to define standards around human-machine interaction including the level of transparency that will be required. Will chatbots need to disclose that they are chatbots? Will a notice need to be posted that facial recognition technology is used in a CCTV camera? Will a company need to disclose in terms of service and privacy policies that data is processed via an AI driven solution? Will there be a distinction if the AI takes the decision autonomously vs. if the AI played an augmenting role? Presently, the Niti Aayog paper has been silent on this question.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Explainability&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;An explanation is not equivalent to complete  transparency. The obligation of providing an explanation does not mean  that the developer should necessarily  know the flow of bits through the AI system. Instead, the legal requirement of providing an explanation requires an ability to explain how certain parameters may be utilised to arrive at an outcome in a certain situation.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Doshi-Velez and Kortz have highlighted two technical ideas that may enhance a developer's ability to explain the functioning of AI systems:&lt;a href="#_ftn59" name="_ftnref59"&gt;&lt;sup&gt;&lt;sup&gt;[59]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;1) Differentiation and processing: AI systems are designed to have the inputs differentiated and processed through various forms of computation-in a reproducible and robust manner. Therefore, developers should be able to explain a particular decision by examining the inputs in an attempt to determine which of them have the greatest impact on the outcome.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;2) Counterfactual faithfulness: The second property of counterfactual faithfulness enables the developer to consider which factors caused a difference in the outcomes. Both these solutions can be deployed without necessarily knowing the contents of black boxes. As per Pasquale, ‘Explainability matters because the process of reason-giving is intrinsic to juridical determinations – not simply one modular characteristic jettisoned as anachronistic once automated prediction is sufficiently advanced.”&lt;a href="#_ftn60" name="_ftnref60"&gt;&lt;sup&gt;&lt;sup&gt;[60]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Rules based system applied contextually&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;Oswald et al have suggested two proposals that might  mitigate algorithmic opacity.by designing a broad rules-based system, whose implementation need to be applied in a context-specific manner which thoroughly evaluates the key enablers and challengers in each specific use case.&lt;a href="#_ftn61" name="_ftnref61"&gt;&lt;sup&gt;&lt;sup&gt;[61]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;Experimental proportionality was designed to enable the courts to make proportionality determinations of an algorithm at the experimental stage even before the impacts are fully realised in a manner that would enable them to ensure that appropriate metrics for performance evaluation and cohesive principles of design have been adopted. In such cases they recommend that the courts give the benefit of the doubt to the public sector body subject to another hearing within a stipulated period of time once data on the impacts of the algorithm become more readily available.&lt;/li&gt;
&lt;li&gt;‘ALGO-CARE' calls for the design of a rules-based system which ensures that the algorithms&lt;a href="#_ftn62" name="_ftnref62"&gt;&lt;sup&gt;&lt;sup&gt;[62]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; are:&lt;/li&gt;
&lt;/ul&gt;
&lt;p style="text-align: justify; "&gt;(1) Advisory: Algorithms must retain an advisory capacity that augments existing human capability rather than replacing human discretion outright;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(2) Lawful: Algorithm's proposed function, application, individual effect and use of datasets should be considered in  symbiosis with necessity, proportionality and data minimisation principles;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(3) Granularity: Issues such as data analysis issues such as meaning of data, challenges stemming from disparate tracts of data, omitted data and inferences  should be key points in the implementation process;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(4) Ownership: Due regard should be given to intellectual property ownership but in the case of algorithms used for governance, it may be better to have open source algorithms at the default.  Regardless of the sector,the developer must ensure that the algorithm works in a manner that enables a third party to investigate the workings of the algorithm in an adversarial judicial context.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(5)Challengeable:The results of algorithmic analysis should be applied with regard to professional codes and regulations and be challengeable. In a report evaluating the NITI AAYOG  Discussion Paper, CIS has argued that AI that is used for governance , must be made auditable in the public domain,if not under Free and Open Source Software (FOSS)-particularly in the case of AI that has implications for fundamental rights.&lt;a href="#_ftn63" name="_ftnref63"&gt;&lt;sup&gt;&lt;sup&gt;[63]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(6) Accuracy: The design of the algorithm should check for accuracy;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(7) Responsible: Should consider a wider set of ethical and moral principles and the foundations of human rights as a guarantor of human dignity at all levels and&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(8) Explainable: Machine Learning should be interpretable and accountable.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A rules based system like ALGO-CARE can enable predictability in use frameworks for AI. Predictability compliments and strengthens  transparency.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Accountability&lt;/b&gt;&lt;/h4&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Conduct Impact Assessment&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;There is a need to evolve Algorithmic Impact Assessment frameworks for the different sectors in India, which should address issues of bias, unfairness and other harmful impacts of use of automated decision making. AI is a nascent field and the impact of the technology on the economy, society, etc. is still yet to be fully understood. Impact assessment standards will be important in identifying and addressing potential or existing harms and could potentially be more important in sectors or uses where there is direct human interaction with AI or power dimensions - such as in healthcare or use by the government. A 2018 Report by the AI Now Institute lists methods that should be adopted by the government for conducting his holistic assessment&lt;a href="#_ftn64" name="_ftnref64"&gt;&lt;sup&gt;&lt;sup&gt;[64]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;: These should  include: (1) Self-assessment by the government department in charge of implementing the technology, (2)Development of meaningful inter-disciplinary external researcher review mechanisms, (3) Notice to the public regarding  self-assessment and external review, (4)Soliciting of public comments for clarification or concerns, (5) Special regard to vulnerable communities who may not be able to exercise their voice in public proceedings. An adequate review mechanism which holistically evaluates the impact of AI would ideally include all five of these components in conjunction with each other.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Regulation of Algorithms&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;Experts have voiced concerns about AI mimicking human prejudices due to the biases present in the Machine Learning algorithms. Scientists have revealed through their research that machine learning algorithms can imbibe gender and racial prejudices which are ingrained in language patterns or data collection processes. Since AI and machine algorithms are data driven, they arrive at results and solutions based on available &lt;br /&gt; and historical data. When this data itself is biased, the solutions presented by the AI will also be biased. While this is inherently discriminatory, scientists have provided solutions to rectify these biases which can occur at various stages by introducing a counter bias at another stage. It has also been suggested that data samples should be shaped in such a manner so as to minimise the chances of algorithmic bias. Ideally regulation of algorithms could be tailored - explainability, traceability, scrutability. We recommend that the national strategy on AI policy must take these factors into account and combination of a central agency driving the agenda, and sectoral actors framing regulations around specific uses of AI that are problematic and implementation is required.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As the government begins to adopt AI into governance - the extent to which and the  circumstances autonomous decision making capabilities can be delegated to AI need to be questioned. Questions on whether AI should be autonomous, should always have a human in the loop, and should have a ‘kill-switch’ when used in such contexts also need to be answered. A framework or high level principles can help to guide these determinations. For example:&lt;/p&gt;
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;Modeling Human Behaviour: An AI solution trying to model human behaviour, as in the case of judicial decision-making or predictive policing may need to be more regulated, adhere to stricter standards, and need more oversight than an algorithm that is trying to predict ‘natural’ phenomenon such as traffic congestion or weather patterns.&lt;/li&gt;
&lt;li&gt;Human Impact: An AI solution which could cause greater harm if applied erroneously-such as a robot soldier that mistakenly targets a civilian requires a different level and framework of regulation  than an AI solution  designed to create a learning path for a student in the education sector and errs in making an appropriate assessment.. &lt;/li&gt;
&lt;li&gt;Primary User: AI solutions whose primary users are state agents attempting to discharge duties in the public interest such as policemen, should be approached with more caution than those used by individuals such as farmers getting weather alerts&lt;/li&gt;
&lt;/ul&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Fairness&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;It is possible to incorporate broad definitions of fairness into a wide range of data analysis and classification systems.&lt;a href="#_ftn65" name="_ftnref65"&gt;&lt;sup&gt;&lt;sup&gt;[65]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; While there can be no bright-line rules that will necessarily enable the operator or designer of a Machine Learning System to arrive at an ex ante determination of fairness, from a public policy perspective, there must be a set of rules or best practices that explain how notions of fairness should be utilised in the real world applications of AI-driven solutions.&lt;a href="#_ftn66" name="_ftnref66"&gt;&lt;sup&gt;&lt;sup&gt;[66]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; While broad parameters should be encoded by the developer to ensure compliance with constitutional standards, it is also crucial that the functioning of the algorithm allows for an ex-post determination of fairness by an independent oversight body if the impact of the AI driven solution is challenged.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Further, while there is no precedent on this anywhere in the world, India could consider establishing a Committee entrusted with the specific task of continuously evaluating the operation of AI-driven algorithms. Questions that the government would need to answer with regard to this body include:&lt;/p&gt;
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;What should the composition of the body be?&lt;/li&gt;
&lt;li&gt;What should be the procedural mechanisms that govern the operation of the body?&lt;/li&gt;
&lt;li&gt;When should the review committee step in? This is crucial because excessive review may re-entrench the bureaucracy that the AI driven solution was looking to eliminate.&lt;/li&gt;
&lt;li&gt;What information will be necessary for the review committee to carry out its determination? Will there be conflicts with IP, and if so how will these be resolved?&lt;/li&gt;
&lt;li&gt;To what degree will the findings of the committee be made public?&lt;/li&gt;
&lt;li&gt;What powers will the committee have? Beyond making determinations, how will these be enforced?&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Market incentives&lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Standards as a means to address data issues&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;With digitisation of legacy records and the ability to capture more granular data digitally, one of the biggest challenges facing Big Data is a lack of standardised data and interoperability frameworks. This is particularly true in the healthcare and medicine sector where medical records do not follow a clear standard, which poses a challenge to their datafication and analysis. The presence of developed standards in data management and exchange,  interoperable Distributed Application Platform and Services, Semantic related standards for markup, structure, query, semantics, Information access and exchange have been spoken of as essential to address the issues of lack of standards in Big Data.&lt;a href="#_ftn67" name="_ftnref67"&gt;&lt;sup&gt;&lt;sup&gt;[67]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Towards enabling usability of data, it is important that clear data standards are established. This has been recognized by Niti Aayog in its National Strategy for AI. On one hand, there can operational issues with allowing each organisation to choose their own specific standards to operate under, while on the other hand, non-uniform digitisation of data will also cause several practical problems, most primarily to do with interoperability of the individual services, as well as their usability. For instance, in the healthcare sector, though India has adopted an EHR policy, implementation of this policy is not yet harmonized - leading to different interpretations of ‘digitizing records (i.e taking snapshots of doctor notes), retention methods and periods, and comprehensive implementation across all hospital data. Similarly, while independent banks and other financial organisations are already following, or in the process of developing internal practices,there exist no uniform standards for digitisation of financial data. As AI development, and application becomes more mainstream in the financial sector, the lack of a fixed standard could create significant problems.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Better Design Principles in Data Collection&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;An enduring criticism of the existing notice and consent framework has been that long, verbose and unintelligible privacy notices are not efficient in informing individuals and helping them make rational choices. While this problem predates Big Data, it has only become more pronounced in recent times, given the ubiquity of data collection and implicit ways in which data is being collected and harvested. Further, constrained interfaces on mobile devices, wearables, and smart home devices connected in an Internet of Things amplify the usability issues of the privacy notices. Some of the issues with privacy notices include Notice complexity, lack of real choices, notices decoupled from the system collecting data etc. An industry standard for a design approach to privacy notices which includes looking at factors such as the timing of the notice, the channels used for communicating the notices, the modality (written, audio, machine readable, visual) of the notice and whether the notice only provides information or also include choices within its framework, would be of great help.  Further, use of privacy by design principles can be done not just at the level of privacy notices but at each step of the information flow, and the architecture of the system can be geared towards more privacy enhanced choices.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref1" name="_ftn1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/artificial-intelligence-in-india-a-compendium&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref2" name="_ftn2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://obamawhitehouse.archives.gov/sites/default/files/whitehouse_files/microsites/ostp/NSTC/preparing_for_the_future_of_ai.pdf"&gt;https://obamawhitehouse.archives.gov/sites/default/files/whitehouse_files/microsites/ostp/NSTC/preparing_for_the_future_of_ai.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref3" name="_ftn3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.nitrd.gov/PUBS/national_ai_rd_strategic_plan.pdf"&gt;https://www.nitrd.gov/PUBS/national_ai_rd_strategic_plan.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref4" name="_ftn4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.gov.uk/government/publications/artificial-intelligence-sector-deal/ai-sector-deal&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref5" name="_ftn5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.nedo.go.jp/content/100865202.pdf"&gt;http://www.nedo.go.jp/content/100865202.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref6" name="_ftn6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.eu-robotics.net/sparc/10-success-stories/european-robotics-creating-new-markets.html?changelang=2&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref7" name="_ftn7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.cifar.ca/ai/pan-canadian-artificial-intelligence-strategy"&gt;https://www.cifar.ca/ai/pan-canadian-artificial-intelligence-strategy&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref8" name="_ftn8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.newamerica.org/cybersecurity-initiative/blog/chinas-plan-lead-ai-purpose-prospects-and-problems/"&gt;https://www.newamerica.org/cybersecurity-initiative/blog/chinas-plan-lead-ai-purpose-prospects-and-problems/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref9" name="_ftn9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.uaeai.ae/en/"&gt;http://www.uaeai.ae/en/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref10" name="_ftn10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.aisingapore.org/"&gt;https://www.aisingapore.org/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref11" name="_ftn11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://news.joins.com/article/22625271"&gt;https://news.joins.com/article/22625271&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref12" name="_ftn12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.aiforhumanity.fr/pdfs/MissionVillani_Report_ENG-VF.pdf"&gt;https://www.aiforhumanity.fr/pdfs/MissionVillani_Report_ENG-VF.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref13" name="_ftn13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe"&gt;https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe&lt;/a&gt; &lt;a href="https://www.euractiv.com/section/digital/news/twenty-four-eu-countries-sign-artificial-intelligence-pact-in-bid-to-compete-with-us-china/"&gt;https://www.euractiv.com/section/digital/news/twenty-four-eu-countries-sign-artificial-intelligence-pact-in-bid-to-compete-with-us-china/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref14" name="_ftn14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.aitf.org.in/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref15" name="_ftn15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://www.niti.gov.in/writereaddata/files/document_publication/NationalStrategy-for-AI-Discussion-Paper.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref16" name="_ftn16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://obamawhitehouse.archives.gov/sites/default/files/whitehouse_files/microsites/ostp/NSTC/preparing_for_the_future_of_ai.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref17" name="_ftn17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.cifar.ca/ai/pan-canadian-artificial-intelligence-strategy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref18" name="_ftn18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/the-ai-task-force-report-the-first-steps-towards-indias-ai-framework&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref19" name="_ftn19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/niti-aayog-discussion-paper-an-aspirational-step-towards-india2019s-ai-policy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref20" name="_ftn20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe"&gt;https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref21" name="_ftn21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://pib.nic.in/newsite/PrintRelease.aspx?relid=181007&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref22" name="_ftn22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Ryan Calo, 2017 Artificial Intelligence Policy: A Primer and Roadmap. U.C. Davis L. Review,&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Vol. 51, pp. 398 - 435.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt; &lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref23" name="_ftn23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://trai.gov.in/sites/default/files/CIS_07_11_2017.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref24" name="_ftn24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref25" name="_ftn25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://www.niti.gov.in/writereaddata/files/document_publication/NationalStrategy-for-AI-Discussion-Paper.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref26" name="_ftn26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://martechtoday.com/bottos-launches-a-marketplace-for-data-to-train-ai-models-214265&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref27" name="_ftn27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://opensource.com/article/18/5/top-8-open-source-ai-technologies-machine-learning&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref28" name="_ftn28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Amanda Levendowski, How Copyright Law Can Fix Artificial Intelligence’s&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Implicit Bias Problem, 93 WASH. L. REV. (forthcoming 2018) (manuscript at 23, 27-32),&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3024938"&gt;https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3024938&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref29" name="_ftn29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;Id&lt;/i&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref30" name="_ftn30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; H. Brendan McMahan, et al., Communication-Efficient Learning of Deep Networks&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;from Decentralized Data, arXiv:1602.05629 (Feb. 17, 2016), &lt;a href="https://arxiv.org/abs/1602.05629"&gt;https://arxiv.org/abs/1602.05629&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref31" name="_ftn31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;Id&lt;/i&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref32" name="_ftn32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Pierre N. Leval, Nimmer Lecture: Fair Use Rescued, 44 UCLA L. REV. 1449, 1457 (1997).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref33" name="_ftn33"&gt;&lt;sup&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/niti-aayog-discussion-paper-an-aspirational-step-towards-india2019s-ai-policy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref34" name="_ftn34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/niti-aayog-discussion-paper-an-aspirational-step-towards-india2019s-ai-policy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref35" name="_ftn35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Discussion Paper on National Strategy for Artificial Intelligence | NITI Aayog | National Institution for Transforming India. (n.d.) p. 54. Retrieved from http://niti.gov.in/content/national-strategy-ai-discussion-paper.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref36" name="_ftn36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Leverhulme Centre for the Future of Intelligence, http://lcfi.ac.uk/.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref37" name="_ftn37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; AI Now, https://ainowinstitute.org/.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref38" name="_ftn38"&gt;&lt;sup&gt;&lt;sup&gt;[38]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref39" name="_ftn39"&gt;&lt;sup&gt;&lt;sup&gt;[39]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://iridescentlearning.org/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref40" name="_ftn40"&gt;&lt;sup&gt;&lt;sup&gt;[40]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref41" name="_ftn41"&gt;&lt;sup&gt;&lt;sup&gt;[41]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Points, L., &amp;amp; Potton, E. (2017). Artificial intelligence and automation in the UK.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref42" name="_ftn42"&gt;&lt;sup&gt;&lt;sup&gt;[42]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Paul, Y., Hickok, E., Sinha, A. and Tiwari, U., Artificial Intelligence in the Healthcare Industry in India, Centre for Internet and Society. Available at &lt;a href="https://cis-india.org/internet-governance/files/ai-and-healtchare-report"&gt;https://cis-india.org/internet-governance/files/ai-and-healtchare-report&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref43" name="_ftn43"&gt;&lt;sup&gt;&lt;sup&gt;[43]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Goudarzi, S., Hickok, E., and Sinha, A., AI in the Banking and Finance Industry in India,  Centre for Internet and Society. Available at &lt;a href="https://cis-india.org/internet-governance/blog/ai-in-banking-and-finance"&gt;https://cis-india.org/internet-governance/blog/ai-in-banking-and-finance&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref44" name="_ftn44"&gt;&lt;sup&gt;&lt;sup&gt;[44]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Paul, Y., Hickok, E., Sinha, A. and Tiwari, U., Artificial Intelligence in the Healthcare Industry in India, Centre for Internet and Society. Available at &lt;a href="https://cis-india.org/internet-governance/files/ai-and-healtchare-report"&gt;https://cis-india.org/internet-governance/files/ai-and-healtchare-report&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref45" name="_ftn45"&gt;&lt;sup&gt;&lt;sup&gt;[45]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://news.microsoft.com/en-in/government-karnataka-inks-mou-microsoft-use-ai-digital-agriculture/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref46" name="_ftn46"&gt;&lt;sup&gt;&lt;sup&gt;[46]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://news.microsoft.com/en-in/government-telangana-adopts-microsoft-cloud-becomes-first-state-use-artificial-intelligence-eye-care-screening-children/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref47" name="_ftn47"&gt;&lt;sup&gt;&lt;sup&gt;[47]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; NITI Aayog. (2018). Discussion Paper on National Strategy for Artificial Intelligence. Retrieved from http://niti.gov.in/content/national-strategy-ai-discussion-paper. 18&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref48" name="_ftn48"&gt;&lt;sup&gt;&lt;sup&gt;[48]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://edps.europa.eu/sites/edp/files/publication/16-10-19_marrakesh_ai_paper_en.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref49" name="_ftn49"&gt;&lt;sup&gt;&lt;sup&gt;[49]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref50" name="_ftn50"&gt;&lt;sup&gt;&lt;sup&gt;[50]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; J. Schradie, The Digital Production Gap: The Digital Divide and Web 2.0 Collide. Elsevier Poetics, 39 (1).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref51" name="_ftn51"&gt;&lt;sup&gt;&lt;sup&gt;[51]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; D Lazer, et al., The Parable of Google Flu: Traps in Big Data Analysis. Science. 343 (1).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref52" name="_ftn52"&gt;&lt;sup&gt;&lt;sup&gt;[52]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Danah Boyd and Kate Crawford,  Critical Questions for Big Data. Information, Communication &amp;amp; Society. 15 (5).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref53" name="_ftn53"&gt;&lt;sup&gt;&lt;sup&gt;[53]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; John Podesta, (2014) Big Data: Seizing Opportunities, Preserving Values, available at&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="http://www.whitehouse.gov/sites/default/files/docs/big_data_privacy_report_may_1_2014.pdf"&gt;http://www.whitehouse.gov/sites/default/files/docs/big_data_privacy_report_may_1_2014.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref54" name="_ftn54"&gt;&lt;sup&gt;&lt;sup&gt;[54]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; E. Ramirez, (2014) FTC to Examine Effects of Big Data on Low Income and Underserved Consumers at September Workshop, available at &lt;a href="http://www.ftc.gov/news-events/press-releases/2014/04/ftc-examine-effects-big-data-lowincome-underserved-consumers"&gt;http://www.ftc.gov/news-events/press-releases/2014/04/ftc-examine-effects-big-data-lowincome-underserved-consumers&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref55" name="_ftn55"&gt;&lt;sup&gt;&lt;sup&gt;[55]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; M. Schrage, Big Data’s Dangerous New Era of Discrimination, available at &lt;a href="http://blogs.hbr.org/2014/01/bigdatas-dangerous-new-era-of-discrimination/"&gt;http://blogs.hbr.org/2014/01/bigdatas-dangerous-new-era-of-discrimination/&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref56" name="_ftn56"&gt;&lt;sup&gt;&lt;sup&gt;[56]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Google/DoubleClick Merger case&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref57" name="_ftn57"&gt;&lt;sup&gt;&lt;sup&gt;[57]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; French Competition Authority, Opinion n°10-A-13 of 1406.2010,&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;http://www.autoritedelaconcurrence.fr/pdf/avis/10a13.pdf. That opinion of the Authority aimed at&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;giving general guidance on that subject. It did not focus on any particular market or industry&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;although it described a possible application of its analysis to the telecom industry.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref58" name="_ftn58"&gt;&lt;sup&gt;&lt;sup&gt;[58]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.analysisgroup.com/is-big-data-a-true-source-of-market-power/#sthash.5ZHmrD1m.dpuf"&gt;http://www.analysisgroup.com/is-big-data-a-true-source-of-market-power/#sthash.5ZHmrD1m.dpuf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref59" name="_ftn59"&gt;&lt;sup&gt;&lt;sup&gt;[59]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Doshi-Velez, F., Kortz, M., Budish, R., Bavitz, C., Gershman, S., O'Brien, D., ... &amp;amp; Wood, A. (2017). Accountability of AI under the law: The role of explanation. arXiv preprint arXiv:1711.01134.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref60" name="_ftn60"&gt;&lt;sup&gt;&lt;sup&gt;[60]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Frank A. Pasquale ‘Toward a Fourth Law of Robotics: Preserving Attribution, Responsibility, and Explainability in an Algorithmic Society’ (July 14, 2017). Ohio State Law Journal, Vol. 78, 2017; U of Maryland Legal Studies Research Paper No. 2017-21, 7.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref61" name="_ftn61"&gt;&lt;sup&gt;&lt;sup&gt;[61]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Oswald, M., Grace, J., Urwin, S., &amp;amp; Barnes, G. C. (2018). Algorithmic risk assessment policing models: lessons from the Durham HART model and ‘Experimental’ proportionality. Information &amp;amp; Communications Technology Law, 27(2), 223-250.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref62" name="_ftn62"&gt;&lt;sup&gt;&lt;sup&gt;[62]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Ibid.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref63" name="_ftn63"&gt;&lt;sup&gt;&lt;sup&gt;[63]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Abraham S., Hickok E., Sinha A., Barooah S., Mohandas S., Bidare P. M., Dasgupta S., Ramachandran V., and Kumar S., NITI Aayog Discussion Paper: An aspirational step towards India’s AI policy. Retrieved from https://cis-india.org/internet-governance/files/niti-aayog-discussion-paper.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref64" name="_ftn64"&gt;&lt;sup&gt;&lt;sup&gt;[64]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Reisman D., Schultz J., Crawford K., Whittaker M., (2018, April) Algorithmic Impact Assessments: A Practical Framework For Public Agency Accountability. Retrieved from https://ainowinstitute.org/aiareport2018.pdf.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref65" name="_ftn65"&gt;&lt;sup&gt;&lt;sup&gt;[65]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Sample I., (2017, November 5) Computer says no: why making AIs fair, accountable and transparent is crucial. Retrieved from &lt;a href="https://www.theguardian.com/science/2017/nov/05/computer-says-no-why-making-ais-fair-accountable-and-transparent-is-crucial"&gt;https://www.theguardian.com/science/2017/nov/05/computer-says-no-why-making-ais-fair-accountable-and-transparent-is-crucial&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref66" name="_ftn66"&gt;&lt;sup&gt;&lt;sup&gt;[66]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Kroll, J. A., Barocas, S., Felten, E. W., Reidenberg, J. R., Robinson, D. G., &amp;amp; Yu, H. (2016). Accountable algorithms. U. Pa. L. Rev., 165, 633.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref67" name="_ftn67"&gt;&lt;sup&gt;&lt;sup&gt;[67]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.iso.org/iso/big_data_report-jtc1.pdf"&gt;http://www.iso.org/iso/big_data_report-jtc1.pdf&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda'&gt;https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Amber Sinha, Elonnai Hickok and Arindrajit Basu</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-05T15:39:59Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/files/ai-in-india-a-policy-agenda">
    <title>AI in India a Policy Agenda</title>
    <link>https://cis-india.org/internet-governance/files/ai-in-india-a-policy-agenda</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/files/ai-in-india-a-policy-agenda'&gt;https://cis-india.org/internet-governance/files/ai-in-india-a-policy-agenda&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>elonnai</dc:creator>
    <dc:rights></dc:rights>


   <dc:date>2018-09-05T15:26:08Z</dc:date>
   <dc:type>File</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/huffington-post-august-25-2018-paul-bluementhal-and-gopal-sathe-indias-biometric-database-is-creating-a-perfect-surveillance-state">
    <title>India’s Biometric Database Is Creating A Perfect Surveillance State — And U.S. Tech Companies Are On Board</title>
    <link>https://cis-india.org/internet-governance/news/huffington-post-august-25-2018-paul-bluementhal-and-gopal-sathe-indias-biometric-database-is-creating-a-perfect-surveillance-state</link>
    <description>
        &lt;b&gt;The Aadhaar program offers a glimpse of the tech world's latest quest to control our lives, where dystopias are created in the name of helping the impoverished.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Paul Bluementhol and Gopal Sathe was published in &lt;a class="external-link" href="https://www.huffingtonpost.in/entry/india-aadhuar-tech-companies_us_5b7ebc53e4b0729515109fd0"&gt;Huffington Post&lt;/a&gt; on August 25, 2018. Sunil Abraham was quoted.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Big U.S. technology  companies are involved in the construction of one of the most intrusive  citizen surveillance programs in history.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For the past nine years, India has  been building the world’s biggest biometric database by collecting the  fingerprints, iris scans and photos of nearly 1.3 billion people. For  U.S. tech companies like Microsoft, Amazon and Facebook, the project,  called Aadhaar (which means “proof” or “basis” in Hindi), could be a  gold mine.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The CEO of Microsoft has repeatedly praised the project, and local media have carried frequent reports on &lt;a href="https://m.economictimes.com/tech/hardware/uidai-wants-to-make-mobile-phones-aadhaar-enabled-holds-discussion-with-smartphone-makers/amp_articleshow/53441186.cms?__twitter_impression=true" rel="noopener noreferrer" target="_blank"&gt;consultations between the Indian government and senior executives&lt;/a&gt; from companies like Apple and Google (in addition to South Korean-based  Samsung) on how to make tech products Aadhaar-enabled. But when  reporters of HuffPost and HuffPost India asked these companies in the  past weeks to confirm they were integrating Aadhaar into their products,  only one company ― Google ― gave a definitive response.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;That’s because Aadhaar has become  deeply controversial, and the subject of a major Supreme Court of India  case that will decide the future of the program as early as this month.  Launched nine years ago as a simple and revolutionary way to streamline  access to welfare programs for India’s poor, the database has become  Indians’ gateway to nearly any type of service ― from food stamps to a  passport or a cell phone connection. Practical errors in the system have caused &lt;a href="https://stateofaadhaar.in/report_pages/state-of-aadhaar-report-2017-18/" rel="noopener noreferrer" target="_blank"&gt;millions&lt;/a&gt; of poor Indians to lose out on aid. And the exponential growth of the  project has sparked concerns among security researchers and academics  that India is the first step toward setting up a surveillance society to  rival China.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;A Scheme Born In The U.S.&lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Tapping into Aadhaar would help big  tech companies access the data and transactions of millions of users in  the second most populous country on earth, explained &lt;a href="https://www.huffingtonpost.in/2018/06/06/after-beta-testing-on-a-billion-indians-the-tech-behind-aadhaar-is-going-global_a_23452248/" rel="noopener noreferrer" target="_blank"&gt;Usha Ramanathan&lt;/a&gt;, a Delhi-based lawyer, legal researcher and one of Aadhaar’s most vocal critics.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The idea for India’s national  biometric identification team wasn’t unprecedented, and in fact, it has  strong parallels with a system proposed for the United States. Following  the Sept. 11, 2001, attacks, the CEO of Oracle, Larry Ellison, offered  to build the&lt;a href="https://www.computerworld.com/article/2583197/data-privacy/ellison-offers-free-software-for-national-id.html" rel="noopener noreferrer" target="_blank"&gt; U.S. government software&lt;/a&gt; for a national identification system that would include a centralized  computer database of all U.S. citizens. The program never got off the  ground amid objections from privacy and civil liberties advocates, but  India’s own Ellison figure, Nandan Nilekani, had a similar idea. The  billionaire founder of IT consulting giant Infosys, Nilekani  conceptualized Aadhaar as a way to eliminate waste and corruption in  India’s social welfare programs. He lobbied the government to bring in  Aadhaar, and went on to run the project under the administration of  Manmohan Singh. Nilekani gained even more influence under current Prime  Minister Narendra Modi, who moved to make Aadhaar necessary for almost  any kind of business in India.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The first 12-digit Aadhaar ID was  issued in 2010. Today, over a billion people (around 89 percent of  India’s population) have been included in the system ― from India’s  unimaginably wealthy billionaires to the homeless, from residents of the  country’s sprawling cities to remote inaccessible villages. While  initially a voluntary program, the database is now linked to just about  all government programs. You need an Aadhaar ID to get a &lt;a href="https://www.businesstoday.in/current/economy-politics/uidai-aadhaar-tatkal-passports-deadline-extension-order/story/272576.html" rel="noopener noreferrer" target="_blank"&gt;passport issued or renewed&lt;/a&gt;. Aadhaar was made mandatory for operating a bank account, using a cell phone or investing in mutual funds, only for the proposals to be rolled back pending the Supreme Court verdict on the constitutionality of the project.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As Aadhaar identification became  integrated into other systems like banking, cell phones and government  programs, tech companies can use the program to cross-reference their  datasets against other&lt;a href="https://www.hindustantimes.com/india-news/why-state-data-hubs-pose-a-risk-to-aadhaar-security/story-Klyl3yT5MkFk6Szg2yGg9N.html" rel="noopener noreferrer" target="_blank"&gt; databases&lt;/a&gt; and assemble a far more detailed and intrusive picture of Indians’  lives. That would allow them, for example, to better target products or  advertising to the vast Indian population. “You can take a unique  identifying number and use it to find data in different sectors,”  explained &lt;a href="https://www.huffingtonpost.in/2018/04/25/aadhaar-seeding-fiasco-how-to-geo-locate-every-minority-family-in-ap-with-one-click_a_23419643/" rel="noopener noreferrer" target="_blank"&gt;Pam Dixon&lt;/a&gt;,  executive director of the World Privacy Forum, an American public  interest research group. “That number can be cross-walked across all the  different parts of their life.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Microsoft, which uses  Aadhaar in a new version of Skype to verify users, declined to talk  about its work integrating products with the Aadhaar database. But Bill  Gates, Microsoft’s founder, &lt;a href="https://timesofindia.indiatimes.com/business/india-business/aadhaar-doesnt-pose-any-privacy-issue-gates/articleshow/64012833.cms" rel="noopener noreferrer" target="_blank"&gt;has publicly endorsed Aadhaar&lt;/a&gt; and his foundation is funding a World Bank program to bring Aadhaar-like  ID programs to other countries. Gates has also argued that ID  verification schemes like Aadhaar in itself don’t pose privacy issues.  Microsoft CEO Satya Nadella has repeatedly praised Aadhaar in both his  recent book and a &lt;a href="https://gadgets.ndtv.com/internet/features/satya-nadella-and-nandan-nilekani-talk-aadhaar-india-stack-ai-and-ar-1661798" rel="noopener noreferrer" target="_blank"&gt;tour across India&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Amazon did not respond to a request for comment, but according to a &lt;a href="https://www.buzzfeednews.com/article/pranavdixit/amazon-is-asking-indians-to-hand-over-their-aadhaar-indias" rel="noopener noreferrer" target="_blank"&gt;BuzzFeed report&lt;/a&gt;, the company told Indian customers not  uploading a copy of Aadhaar “might result in a delay in the resolution  or no resolution” of cases where packages were missing.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Facebook, too, failed to respond to  repeated requests for comment, though the platform’s prompts for users  to log in with the same name as their Aadhaar card prompted suspicions from &lt;a href="https://gadgets.ndtv.com/social-networking/news/facebook-aadhaar-real-name-new-user-sign-up-onboarding-process-test-1792648" rel="noopener noreferrer" target="_blank"&gt;users&lt;/a&gt; that  it wanted everyone to use their Aadhaar-verified names and spellings so  they could later build in Aadhaar functionality with minimal problems.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A spokesman for Google, which has its  own payments platform in India called Tez, told HuffPost that the  company has not integrated any of its products with Aadhaar. But there was outrage earlier in August when the Aadhaar helpline was added &lt;a href="https://www.indiatoday.in/technology/news/story/aadhaar-number-in-phones-uidai-google-clarification-1306344-2018-08-06" rel="noopener noreferrer" target="_blank"&gt;to Android phones without informing users&lt;/a&gt;. Google claimed in a statement to the &lt;a href="https://economictimes.indiatimes.com/news/politics-and-nation/uidai-row-google-says-it-inadvertently-coded-the-number/articleshow/65264353.cms" rel="noopener noreferrer" target="_blank"&gt;Economic&lt;i&gt; Times&lt;/i&gt;&lt;/a&gt; this happened “inadvertently”&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Privacy Jeopardized For Millions&lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;But the same features that are set to  make tech companies millions are are also the ones that threaten the  privacy and security of millions of Indians.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“As long as [the data] is being  shared with so many people and services and companies, without knowing  who has what data, it will always be an issue,” said Srinivas Kodali, an  independent security researcher. “They can’t protect it until they  encrypt it and stop sharing data.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One government website allowed users to search and geolocate homes on the basis of &lt;a href="https://www.huffingtonpost.in/2018/04/25/aadhaar-seeding-fiasco-how-to-geo-locate-every-minority-family-in-ap-with-one-click_a_23419643/" rel="noopener noreferrer" target="_blank"&gt;caste and religion&lt;/a&gt; ― sparking fears of ethnic and religious violence in a country where  lynchings, beatings and mob violence are commonplace. Another website  broadcast the names, phone numbers and medical purchases — like generic  Viagra and HIV medication — of &lt;a href="https://www.huffingtonpost.in/2018/06/17/andhra-pradesh-tracked-you-as-you-bought-viagra-then-put-your-name-and-phone-number-on-the-internet-for-the-world-to-see_a_23459943/" rel="noopener noreferrer" target="_blank"&gt;anyone who buys medicines&lt;/a&gt; from government stores. &lt;a href="https://www.huffingtonpost.in/2018/07/11/indias-latest-data-leak-is-so-basic-that-peoples-aadhaar-number-bank-account-and-fathers-name-are-just-one-google-search-away_a_23479694/" rel="noopener noreferrer" target="_blank"&gt;In another leak&lt;/a&gt;, a Google search for phone numbers of farmers in Andhra Pradesh would reveal their Aadhaar numbers, address, fathers’ names and bank account numbers.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The leaks are aggravated by “a Star  Trek-type obsession” with data dashboards, said Sunil Abraham, executive  director of the Center for Internet and Society. Many government  departments each created an online data dashboard with detailed personal  records on individuals, he explained. The massive centralization of  personal data, he said, &lt;a href="https://www.huffingtonpost.in/2018/07/23/how-andhra-pradesh-built-indias-first-police-state-using-aadhaar-and-a-census_a_23487838/" rel="noopener noreferrer" target="_blank"&gt;created a huge security risk&lt;/a&gt; as these dashboards were accessible to any government official and in many cases, were even left open to the public.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Authentication failures have led to deaths among the poorest sections of Indian society &lt;a href="https://timesofindia.indiatimes.com/city/ranchi/7-hunger-deaths-related-to-aadhaar/articleshow/64695700.cms" rel="noopener noreferrer" target="_blank"&gt;when people were denied government food rations&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;And much like the tech companies,  some local governments are using the system to connect data sets and  build expansive surveillance. In the state of Andhra Pradesh in India,  there’s a &lt;a href="https://www.huffingtonpost.in/2018/07/23/how-andhra-pradesh-built-indias-first-police-state-using-aadhaar-and-a-census_a_23487838/" rel="noopener noreferrer" target="_blank"&gt;war room next to the state chief minister’s office&lt;/a&gt;,  where a wall of screens shows details from databases that collect  information from every department. There are security cameras and  dashboards that track every mention of the chief minister on the news.  There’s a separate team watching what’s being said about him on social  media and there are also dashboards that collect information from IoT  [Internet of Things] sensors across the state.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Court Ruling Could Halt Rollout&lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Those issues around privacy are why  the dreams of government bureaucrats and large tech companies to build a  perfect surveillance apparatus around Aadhaar may ultimately fall  apart. The Supreme Court of India is set to decide on a case that could  decide the future of the program.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The court is set to review 27 petitions, including whether requiring  an Aadhaar for government subsidies and benefits makes access to these  programs conditional, even though the state is constitutionally bound to  deliver them. The petitioners include lawyers, academics and a  92-year-old retired judge whose petition also secured the right to  privacy as a fundamental right in August 2017. Petitioners also argue  that the ability for Aadhaar to be used to track and profile people is  unconstitutional.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In its judgment, due any day now, the court will rule on all 27  petitions together. It will decide not only the fate of the Aadhaar Act  of 2016, but likely the future involvement of some of tech’s biggest  companies in one of the world’s most ambitious and divisive IT projects.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/huffington-post-august-25-2018-paul-bluementhal-and-gopal-sathe-indias-biometric-database-is-creating-a-perfect-surveillance-state'&gt;https://cis-india.org/internet-governance/news/huffington-post-august-25-2018-paul-bluementhal-and-gopal-sathe-indias-biometric-database-is-creating-a-perfect-surveillance-state&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2018-09-04T14:40:51Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/world-library-and-information-congress-2018">
    <title>World Library and Information Congress 2018</title>
    <link>https://cis-india.org/internet-governance/news/world-library-and-information-congress-2018</link>
    <description>
        &lt;b&gt;Swaraj Paul Barooah was a speaker at two panels during the World Library and Information Congress 2018 (WLIC2018), organised by the International Federation of Library Associations and Institutions (IFLA) in Kuala Lumpur on August 26 and 27, 2018.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;Swaraj's first panel, titled "Intellectual Freedom in a Polarised World" was selected as one of 9 sessions to be live-streamed and recorded, out of 249 sessions in total. The recording can be accessed on &lt;a class="external-link" href="https://www.youtube.com/watch?v=0HujFHQn1zY"&gt;YouTube&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Session 123 Intellectual Freedom in a Polarised             World - Freedom of Access to Information and Freedom of             Expression (FAIFE) Advisory Committee (SI)&lt;/b&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Chair: Martyn Wade, United Kingdom&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In many national contexts, citizens are             seen to be either “with the government or against it,”             leaving little opportunity to freely and safely express more             nuanced views of current social, political or economic             issues. While notable authoritarian regimes quite             transparently monitor and limit societal discussion, others,             ostensibly democratic, may work in practice to blunt             potentially unfavourable social commentary on the pretence             of defending political stability or public morality. IFLA’s             Freedom of Access to Information and Freedom of Expression             (FAIFE) Advisory Committee explores this phenomenon--and the             potential role of civil society and information             professionals in advancing freedom of expression--through             the experience and insights of an NGO leader, an academic             public intellectual, and an officer of UNESCO.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Presentations&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Internet and the freedom of expression in Indonesia: opportunity and challenges - Indriaswati Dyah Saptaningrum, University of New South Wales; former Executive Director of the ELSAM human rights organization (Indonesia), Australia&lt;/li&gt;
&lt;li&gt;Freedom of Expression in Malaysia - Azmi Bin Sharom, Faculty of Law, University of Malaysia, Malaysia&lt;/li&gt;
&lt;li&gt;What's up with WhatsApp - polarisation and lynchings in India - Swaraj Paul Barooah, The Centre for Internet and Society, India&lt;/li&gt;
&lt;li&gt;How to align national laws with international standards on freedom of expression? - Ming-Kuok Lim, Programme Specialist for Communication and Information, UNESCO, Indonesia&lt;/li&gt;
&lt;/ol&gt;
&lt;p&gt;&lt;br /&gt;&lt;b&gt;Session 140 To Have and not to Hold: The End of Ownership - CLM and FAIFE&lt;/b&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The shift from buying physical library media to licensing digital content has profound impacts on the way libraries acquire and give access to content. From e-books that can disappear at the whim (or the mistake) of the owners of a server far away, to the limits on sharing and archiving imposed by some contracts. From the potential monitoring of reader behaviour, to the criminalisation of those who simply want to improve user experience. The dominance of digital media in information provision has both broadened the field of information to which we have access, but potentially made it shallower in terms of the use that libraries, and their users, can make of it. The joint CLM-FAIFE session will look at the question of the end of ownership from a legal and an ethical point of view, drawing on the experience and knowledge of the two communities.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Tomas A. Lipinski, School of Information Studies, University of Wisconsin, Milwaukee, USA – The Limits of Licensing.&lt;/li&gt;
&lt;li&gt;Ann Okerson, Centre for Research Libraries, Chicago, USA – The Possibilities of Licensing.&lt;/li&gt;
&lt;li&gt;Swaraj Paul Barooah, Centre for Internet and Society – The Balance among Licenses and Exceptions and Limitations to Copyright.&lt;/li&gt;
&lt;li&gt;Brent Roe - Laurentian University, Sudbury, Canada – Privacy Concerns and Other Side Effects of Licensing.&lt;/li&gt;
&lt;li&gt;Jonathan Hernandez-Perez, Researcher, Instituto de Investigaciones Bibilotecologicas, UNAM, Mexico City, Mexico (Invited) – Special Issues in the Developing World; Open Access as a Recapturing of Ownership.&lt;/li&gt;
&lt;/ul&gt;
&lt;p style="text-align: justify; "&gt; &lt;/p&gt;
&lt;p&gt; &lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/world-library-and-information-congress-2018'&gt;https://cis-india.org/internet-governance/news/world-library-and-information-congress-2018&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Freedom of Speech and Expression</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2018-08-31T02:23:29Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/files/analysis-of-cloud-act-and-implications-for-india">
    <title>Analysis of CLOUD Act and Implications for India</title>
    <link>https://cis-india.org/internet-governance/files/analysis-of-cloud-act-and-implications-for-india</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/files/analysis-of-cloud-act-and-implications-for-india'&gt;https://cis-india.org/internet-governance/files/analysis-of-cloud-act-and-implications-for-india&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>elonnai</dc:creator>
    <dc:rights></dc:rights>


   <dc:date>2018-08-22T14:53:50Z</dc:date>
   <dc:type>File</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/files/indias-contribution-to-internet-governance-debates">
    <title>India's Contribution to Internet Governance Debates</title>
    <link>https://cis-india.org/internet-governance/files/indias-contribution-to-internet-governance-debates</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/files/indias-contribution-to-internet-governance-debates'&gt;https://cis-india.org/internet-governance/files/indias-contribution-to-internet-governance-debates&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>sunil</dc:creator>
    <dc:rights></dc:rights>


   <dc:date>2018-08-16T13:32:54Z</dc:date>
   <dc:type>File</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/a2k/blogs/workshop-of-publishers-and-writers-on-unicode-open-source-and-wikimedia-projects">
    <title>Workshop of Publishers and Writers on Unicode, Open Source and Wikimedia Projects</title>
    <link>https://cis-india.org/a2k/blogs/workshop-of-publishers-and-writers-on-unicode-open-source-and-wikimedia-projects</link>
    <description>
        &lt;b&gt;CIS-A2K team organized a workshop on unicode, open source and wikimedia projects at Pune on July 25, 2018.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;CIS-A2K has started dialogue with the publishers for the last 6 months  regarding FOSS, Open knowledge and content donation to Wikimedia  Projects. As a result, various publishers and writers associated with  social sector have taken initiative to organise workshops. Parisar, NGO  working on environmental issues organised this orientation session for 4  activists, 5 publishers and 6 writers.&lt;/p&gt;
&lt;p&gt;The following issues were discussed with demonstrations -&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Adoption of Unicode and open source softwares&lt;/li&gt;
&lt;li&gt;Make available old reference books, out of print books on web using new technology&lt;/li&gt;
&lt;li&gt;Strengthening of Marathi Wikipedia, Wikisource, Wiktionary etc.&lt;/li&gt;
&lt;li&gt;Social marketing of literary books&lt;/li&gt;
&lt;li&gt;Explore the new medium of e books&lt;/li&gt;
&lt;li&gt;To establish a company on lines of Amazon for e-selling of marathi books&lt;/li&gt;
&lt;/ol&gt;
&lt;p&gt;Read the event details on &lt;a class="external-link" href="https://meta.wikimedia.org/wiki/Workshop_of_Publishers_and_Writers_on_Unicode,_Open_Source_and_Wikimedia_Projects"&gt;Wikimedia Blog&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/a2k/blogs/workshop-of-publishers-and-writers-on-unicode-open-source-and-wikimedia-projects'&gt;https://cis-india.org/a2k/blogs/workshop-of-publishers-and-writers-on-unicode-open-source-and-wikimedia-projects&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Wikimedia</dc:subject>
    
    
        <dc:subject>CIS-A2K</dc:subject>
    
    
        <dc:subject>Wikipedia</dc:subject>
    
    
        <dc:subject>Access to Knowledge</dc:subject>
    

   <dc:date>2018-08-11T02:08:53Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf">
    <title>AI and Governance Case Study pdf</title>
    <link>https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf'&gt;https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>pranav</dc:creator>
    <dc:rights></dc:rights>


   <dc:date>2018-08-01T02:06:47Z</dc:date>
   <dc:type>File</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/files/normative-regulation-of-cyber-space-report">
    <title>Normative Regulation of Cyber Space Report</title>
    <link>https://cis-india.org/internet-governance/files/normative-regulation-of-cyber-space-report</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/files/normative-regulation-of-cyber-space-report'&gt;https://cis-india.org/internet-governance/files/normative-regulation-of-cyber-space-report&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>pranav</dc:creator>
    <dc:rights></dc:rights>


   <dc:date>2018-07-31T23:42:42Z</dc:date>
   <dc:type>File</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/about/newsletters/july-2018-newsletter">
    <title>July 2018 Newsletter</title>
    <link>https://cis-india.org/about/newsletters/july-2018-newsletter</link>
    <description>
        &lt;b&gt;CIS July 2018 newsletter.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;&lt;span&gt;Dear readers,&lt;/span&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Previous issues of the newsletters can be &lt;a class="external-link" href="http://cis-india.org/about/newsletters"&gt;accessed here&lt;/a&gt;.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Highlights&lt;/h2&gt;
&lt;ul&gt;
&lt;li style="text-align: justify; "&gt;Paul Kurien and Akriti Bopanna carried out an &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/icann-diversity-analysis"&gt;analysis of the diversity of participation&lt;/a&gt; at the ICANN processes by taking a close look at their mailing lists. &lt;/li&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://meta.wikimedia.org/wiki/CIS-A2K/Events/2018#July"&gt;CIS-A2K organized 6 events&lt;/a&gt;: partnership discussions with Misimi Telugu monthly magazine; partnership activity in Annamayya Library, Guntur, a workshop in Tumakur University; a workshop of river activists for building Jal Bodh; a workshop of publishers and writers on unicode, open source and wikimedia projects; and a Telugu literary conference.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;CIS had worked with the Research and Advisory Group (RAG) of the Global Commission on the Stability of Cyberspace (GCSC). The work looked at the negotiation processes and strategies that various players may adopt as they drive the cyber norms agenda. In continuation &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/the-potential-for-the-normative-regulation-of-cyberspace-implications-for-india"&gt;CIS has brought out a report&lt;/a&gt; which focuses more extensively on the substantive law and principles at play and looks closely at what the global state of the debate means for India.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;The debate surrounding privacy has in recent times gained momentum due to the Aadhaar judgement and the growing concerns around the use of personal data by corporations and governments. In this light &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/the-centre-for-internet-and-society2019s-comments-and-recommendations-to-the-indian-privacy-code-2018"&gt;CIS has made comments and recommendations to the India Privacy Code, 2018&lt;/a&gt;. &lt;/li&gt;
&lt;li style="text-align: justify; "&gt;CIS &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/cis-submitted-a-response-to-a-notice-of-enquiry-by-the-us-government-on-international-internet-policy-priorities"&gt;drafted a response&lt;/a&gt; to a Notice of Inquiry (NOI) issued by the U.S. Commerce Department's National Telecommunications and Information Administration (NTIA) on "International Internet Policy Priorities." CIS commented on the free flow of information and jurisdiction, mult-stakeholder approach to internet governance, privacy and security.&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;Elonnai Hickok, Shweta Mohandas and Swaraj Paul Barooah &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/the-ai-task-force-report-the-first-steps-towards-indias-ai-framework"&gt;compiled the AI Task Force Report&lt;/a&gt;, India's first step towards an AI framework. The Task Force on Artificial Intelligence was established by the Ministry of Commerce and Industry to leverage AI for economic benefits, and provide policy recommendations on the deployment of AI for India. &lt;/li&gt;
&lt;li style="text-align: justify; "&gt;Paul Kurian and Akriti Bopanna &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/icann-diversity-analysis"&gt;carried out an analysis&lt;/a&gt; of the diversity of participation at the ICANN processes by taking a close look at their mailing lists. &lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;Articles&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="http://cis-india.org/raw/indian-express-july-1-2018-nishant-shah-digital-native-bigger-picture"&gt;Digital Native: The bigger picture&lt;/a&gt; (Nishant Shah; Indian Express; July 1, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/telecom/blog/organizing-india-blogspot-shyam-ponappa-july-6-2018-problems-that-should-occupy-our-electioneers"&gt;The Problems That Should Occupy Our Electioneers&lt;/a&gt; (Shyam Ponappa; Business Standard; July 6, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="http://cis-india.org/raw/indian-express-july-15-2018-nishant-shah-digital-native-the-citys-watching"&gt;Digital Native: How smart cities can make criminals out of denizens&lt;/a&gt; (Nishant Shah; Indian Express; July 15, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/livemint-july-24-2018-swaraj-barooah-and-gurshabad-grover-anti-trafficking-bill-may-lead-to-censorship"&gt;Anti-trafficking Bill may lead to censorship&lt;/a&gt; (Swaraj Barooah and Gurshabad Grover; Livemint; July 24, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="http://cis-india.org/raw/digital-native-hashtag-along-with-me"&gt;Digital Native: Hashtag Along With Me&lt;/a&gt; (Nishant Shah; Indian Express; July 29, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/economic-times-july-30-2018-sunil-abraham-lining-up-data-on-srikrishna-privacy-draft-bill"&gt;Lining up the data on the Srikrishna Privacy Draft Bill&lt;/a&gt; (Sunil Abraham; Economic Times; July 30, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/business-standard-july-31-2018-sunil-abraham-spreading-unhappiness-equally-around"&gt;Spreading unhappiness equally around&lt;/a&gt; (Business Standard; July 31, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;CIS in the News&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/the-national-july-2-2018-samanth-subramanian-smartphone-rumours-spark-series-of-mob-killings-in-india"&gt;Smartphone rumours spark series of mob killings in India&lt;/a&gt; (Samanth Subramanian; The National; July 2, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/huffington-post-july-5-2018-government-gives-nod-to-bill-for-building-dna-databases-in-india-for-criminal-investigation-and-justice-delivery"&gt;Government Gives Nod To Bill For Building DNA Databases In India, For 'Criminal Investigation And Justice Delivery'&lt;/a&gt; (Huffington Post; July 5, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/the-times-of-india-july-6-2018-hope-for-such-swift-crackdowns-for-everyone"&gt;'Hope for such swift crackdowns for everyone&lt;/a&gt;' (Times of India; July 6, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/business-standard-july-9-2018-69-mob-attacks-on-child-lifting-rumours-since-jan-17-only-one-before-that"&gt;Child-lifting rumours caused 69 mob attacks, 33 deaths in last 18 months&lt;/a&gt; (Business Standard; July 9, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/death-by-social-media"&gt;Death by Social Media&lt;/a&gt; (Pretika Khanna, Abhiram Ghadyalpatil and Shaswati Das; Livemint; July 9, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/huffington-post-gopal-sathe-july-12-2018-indias-latest-data-leak-is-so-basic-that-peoples-aadhaar-number-bank-account-and-fathers-name-are-just-one-google-search-away"&gt;India's Latest Data Leak: People's Aadhaar Number And Bank Account Are Just One Google Search Away&lt;/a&gt; (Gopal Sathe; Huffington Post; July 12, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/bloomberg-quint-july-16-2018-people-should-have-right-to-their-data-not-companies-says-trai"&gt;People Should Have Right To Their Data, Not Companies, Says TRAI&lt;/a&gt; (Bloomberg Quint; July 16, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/huffington-post-gopal-sathe-july-16-2018-after-securing-net-neutrality-in-india-trai-goes-to-bat-for-data-privacy"&gt;After Securing Net Neutrality In India, TRAI Goes To Bat For Data Privacy&lt;/a&gt; (Gopal Sathe; Huffington Post; July 16, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/economic-times-july-18-2018-surabhi-agarwal-and-gulveen-aulakh-trai-recommendations-on-data-privacy-raises-eyebrows"&gt;TRAI recommendations on data privacy raises eyebrows &lt;/a&gt;(Surabhi Agarwal and Gulveen Aulakh; Economic Times; July 18, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/economic-times-megha-mandavia-july-19-2018-srikrishna-panel-upset-at-timing-of-trai-suggestions"&gt;Srikrishna panel upset at timing of Trai suggestion&lt;/a&gt;s (Megha Mandavia; Economic Times; July 19, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/deccan-herald-july-20-2018-rajitha-menon-firms-find-wealth-in-your-data"&gt;Firms find wealth in your data&lt;/a&gt; (Rajitha Menon; Deccan Herald; July 20, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/economic-times-venkat-ananth-july-24-2018-whatsapp-races-against-time-to-fix-fake-news-mess-ahead-of-2019-general-elections"&gt;WhatsApp races against time to fix fake news mess ahead of 2019 general elections&lt;/a&gt; (Venkat Ananth; Economic Times; July 24, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/factor-daily-sunny-sen-and-jayadevan-pk-july-25-2018-the-crown-of-thorns-that-awaits-facebook-india-md-hire"&gt;The crown of thorns that awaits Facebook’s India MD hire&lt;/a&gt; (Sunny Sen and Jayadevan PK; Factory Daily; July 25, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/livemint-july-26-2018-mihir-dalal-and-anirban-sen-byte-by-byte-protecting-her-privacy"&gt;Bit by byte protecting her privacy&lt;/a&gt; (Mihir Dalal and Anirban Sen; Livemint; July 26, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/livemint-july-27-2018-komal-gupta-govt-asks-cbi-to-probe-cambridge-analytica-in-data-breach-case"&gt;Govt asks CBI to probe Cambridge Analytica in data breach case&lt;/a&gt; (Komal Gupta; Livemint; July 27, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/news/economic-times-july-28-2018-mugdha-variyar-and-pratik-bhakta-data-localisation-may-pinch-startups-payments-firms"&gt;Data localisation may pinch startups, payments firms&lt;/a&gt; (Mugdha Variyar and Pratik Bhakta; Economic Times; July 28, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;h2&gt;&lt;a href="http://cis-india.org/a2k"&gt;Access to Knowledge&lt;/a&gt;&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Our Access to Knowledge programme currently consists of two projects.  The Pervasive Technologies project, conducted under a grant from the  International Development Research Centre (IDRC), aims to conduct  research on the complex interplay between low-cost pervasive  technologies and intellectual property, in order to encourage the  proliferation and development of such technologies as a social good. The  Wikipedia project, which is under a grant from the Wikimedia  Foundation, is for the growth of Indic language communities and projects  by designing community collaborations and partnerships that recruit and  cultivate new editors and explore innovative approaches to building  projects.&lt;/p&gt;
&lt;h3&gt;Wikipedia&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Blog Entries&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/cb5cbfc95cbfcaaca1cbfcaf-ca4cb0cacca4cbf-ce8ce6ce7cee-cb0cbec82c9acbf-1"&gt;ವಿಕಿಪೀಡಿಯ ತರಬೇತಿ ೨೦೧೮ @ ರಾಂಚಿ&lt;/a&gt; (Vikas Hegde; July 4, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/how-to-write-differently-for-different-telugu-digital-platforms-awareness-session-to-indu-gnana-vedika"&gt;How to write differently for different Telugu digital platforms - awareness session to Indu Gnana Vedika&lt;/a&gt; (Pavan Santosh; July 19, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/c35c3ec1fc4dc38c3ec2ac4d-c38c3ec39c3fc24c4dc2f-c35c47c26c3fc15-c28c41c02c1ac3f-c35c3fc15c40c38c4bc30c4dc38c41c15c41"&gt;వాట్సాప్ సాహిత్య వేదిక నుంచి వికీసోర్సుకు&lt;/a&gt; (Pavan Santosh; July 31, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;div&gt;&lt;b&gt;Events Organized&lt;/b&gt;&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/partnership-activity-in-annamayya-library-guntur"&gt;Partnership activity in Annamayya Library&lt;/a&gt; (Guntur; July 10, 2014).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/partnership-discussions-with-misimi-telugu-monthly-magazine"&gt;Partnership discussions with Misimi Telugu Monthly Magazine&lt;/a&gt; (July 24, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/tumakur%20university-workshop"&gt;Tumakur University Workshop&lt;/a&gt; (Tumkur; July 25, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/workshop-of-river-activists-for-building-jal-bodh-knowledge-resource-on-water"&gt;Workshop of River activists for building Jal Bodh - Knowledge resource on Water&lt;/a&gt; (Pune; July 25, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/a2k/blogs/workshop-of-publishers-and-writers-on-unicode-open-source-and-wikimedia-projects"&gt;Workshop of Publishers and Writers on Unicode, Open Source and Wikimedia Projects&lt;/a&gt; (Pune; July 25, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;div&gt;&lt;/div&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;h2&gt;&lt;a href="http://cis-india.org/internet-governance"&gt;Internet Governance&lt;/a&gt;&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;As part of its research on privacy and free speech, CIS is engaged with  two different projects. The first one (under a grant from Privacy  International and IDRC) is on surveillance and freedom of expression  (SAFEGUARDS). The second one (under a grant from MacArthur Foundation)  is on restrictions that the Indian government has placed on freedom of  expression online.&lt;/p&gt;
&lt;h3&gt;Privacy&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Submissions&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/cis-submitted-a-response-to-a-notice-of-enquiry-by-the-us-government-on-international-internet-policy-priorities"&gt;Response to a Notice of Enquiry by the US Government on International Internet Policy Priorities&lt;/a&gt; (Swagam Dasgupta; July 18, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/the-centre-for-internet-and-society2019s-comments-and-recommendations-to-the-indian-privacy-code-2018"&gt;The Centre for Internet and Society’s Comments and Recommendations to the: Indian Privacy Code, 2018&lt;/a&gt; (Shweta Mohandas, Elonnai Hickok, Amber Sinha and Shruti Trikanand; July 20, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;b&gt;Blog Entry&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/the-ai-task-force-report-the-first-steps-towards-indias-ai-framework"&gt;The AI Task Force Report - The first steps towards India’s AI framework&lt;/a&gt; (Elonnai Hickok, Shweta Mohandas and Swaraj Paul Barooah; June 27, 2018). The blog post was edited by Swagam Dasgupta.&lt;/li&gt;
&lt;/ul&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;div&gt;&lt;b&gt;Participation in Events&lt;/b&gt;&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/news/ietf-102-montreal"&gt;IETF 102 Montreal&lt;/a&gt; (Organized by Internet Engineering Task Force; Fairmont Queen Elizabeth Montreal in Canada; July 14 - 20, 2018). Gurshabad Grover presented a review of the human rights considerations in the drafts of the Software Update for IoT Devices (SUIT) Working Group in the meeting of the HRPC research group. &lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/news/ethical-data-design-practices-in-the-ai-artificial-intelligence-age"&gt;Ethical Data Design Practices in the AI (Artificial Intelligence) Age&lt;/a&gt; (Organized by Startup Grind, Bangalore at NUMA Bangalore; July 28, 2018). Shweta Mohandas was a panelist.&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;div&gt;&lt;/div&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;h3&gt;Cyberspace and Cyber Security&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Analysis&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/the-potential-for-the-normative-regulation-of-cyberspace-implications-for-india"&gt;The Potential for the Normative Regulation of Cyberspace: Implications for India&lt;/a&gt; (Arindrajit Basu; July 30, 2018). The report was edited by Elonnai Hickok, Sunil Abraham and Udbhav Tiwari with research assistance from Tejas Bharadwaj.&lt;/li&gt;
&lt;/ul&gt;
&lt;div&gt;&lt;b&gt;Blog Entry&lt;/b&gt;&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/cis-contributes-to-the-research-and-advisory-group-of-the-global-commission-on-the-stability-of-cyberspace-gcsc"&gt;CIS contributes to the Research and Advisory Group of the Global Commission on the Stability of Cyberspace&lt;/a&gt; (GCSC) (Arindrajit Basu; July 5, 2018). &lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;p&gt;&lt;b&gt;Participation in Event&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/news/ieee-sa-indita-conference-2018"&gt;IEEE-SA InDITA Conference 2018&lt;/a&gt; (Organized by IEEE Standards Association; IIIT-Bangalore; July 10 - 11, 2018). Gurshabad Grover gave a brief presentation on how we could apply or reject 'Trust Through Technology' principles in the design of public biometric authentication. &lt;/li&gt;
&lt;/ul&gt;
&lt;h3&gt;Free Speech &amp;amp; Expression&lt;/h3&gt;
&lt;p&gt;&lt;b&gt;Blog Entries&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/icann-diversity-analysis"&gt;ICANN Diversity Analysis&lt;/a&gt; (Paul Kurian and Akriti Bopanna; July 16, 2018).&lt;/li&gt;
&lt;li&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/didp-31-diversity-of-employees-at-icann"&gt;DIDP #31 Diversity of employees at ICANN&lt;/a&gt; (Akash Sriram; July 19, 2018).&lt;/li&gt;
&lt;/ul&gt;
&lt;div&gt;&lt;b&gt;&lt;br /&gt;Participation in Event&lt;/b&gt;&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/26th-amic-annual-conference-2013-india-2018"&gt;26th AMIC Annual Conference – India 2018&lt;/a&gt; (Organized by Manipal Academy of Higher Education; Fortune Inn Valley View, Manipal, Karnataka; June 7 - 9, 2018). Swaraj Paul Barooah was a speaker. &lt;span&gt;An article announcing the event by Kevin Mendonsa was published in the &lt;/span&gt;&lt;a class="external-link" href="https://timesofindia.indiatimes.com/home/education/news/mahe-to-host-26th-annual-conference-of-amic/articleshow/64468351.cms"&gt;Times of India&lt;/a&gt;&lt;span&gt; on June 5, 2018.&lt;/span&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;div&gt;&lt;/div&gt;
&lt;ul&gt;
&lt;/ul&gt;
&lt;h2&gt;&lt;span style="text-align: justify; "&gt;&lt;a class="external-link" href="http://cis-india.org/telecom"&gt;Telecom&lt;/a&gt;&lt;/span&gt;&lt;/h2&gt;
&lt;p&gt;&lt;span style="text-align: justify; "&gt;CIS is involved in promoting access and accessibility to telecommunications services and resources, and has provided inputs to ongoing policy discussions and consultation papers published by TRAI. It has prepared reports on unlicensed spectrum and accessibility of mobile phones for persons with disabilities and also works with the USOF to include funding projects for persons with disabilities in its mandate:&lt;/span&gt;&lt;/p&gt;
&lt;p&gt;&lt;b&gt;&lt;span style="text-align: justify; "&gt;Newspaper Column&lt;/span&gt;&lt;/b&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;span style="text-align: justify; "&gt;&lt;a class="external-link" href="https://cis-india.org/telecom/blog/organizing-india-blogspot-shyam-ponappa-july-6-2018-problems-that-should-occupy-our-electioneers"&gt;The Problems That Should Occupy Our Electioneers&lt;/a&gt; (Shyam Ponappa; Business Standard; July 5, 2018 and Organizing India Blogspot; July 6, 2018).&lt;/span&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;hr /&gt;
&lt;h2&gt;&lt;span style="text-align: justify; "&gt;&lt;span style="text-align: justify; "&gt;&lt;span style="text-align: justify; "&gt;&lt;a href="http://cis-india.org/"&gt;About CIS&lt;/a&gt;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;The Centre for Internet and  Society (CIS) is a non-profit organisation that undertakes  interdisciplinary research on internet and digital technologies from  policy and academic perspectives. The areas of focus include digital  accessibility for persons with disabilities, access to knowledge,  intellectual property rights, openness (including open data, free and  open source software, open standards, open access, open educational  resources, and open video), internet governance, telecommunication  reform, digital privacy, and cyber-security. The academic research at  CIS seeks to understand the reconfigurations of social and cultural  processes and structures as mediated through the internet and digital  media technologies.&lt;/p&gt;
&lt;p&gt;► Follow us elsewhere&lt;/p&gt;
&lt;div&gt;
&lt;ul&gt;
&lt;li&gt;Twitter:&lt;a href="http://twitter.com/cis_india"&gt; http://twitter.com/cis_india&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Twitter - Access to Knowledge: &lt;a href="https://twitter.com/CISA2K"&gt;https://twitter.com/CISA2K&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Twitter - Information Policy: &lt;a href="https://twitter.com/CIS_InfoPolicy"&gt;https://twitter.com/CIS_InfoPolicy&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Facebook - Access to Knowledge:&lt;a href="https://www.facebook.com/cisa2k"&gt; https://www.facebook.com/cisa2k&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;E-Mail - Access to Knowledge: &lt;a&gt;a2k@cis-india.org&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;E-Mail - Researchers at Work: &lt;a&gt;raw@cis-india.org&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;List - Researchers at Work: &lt;a href="https://lists.ghserv.net/mailman/listinfo/researchers"&gt;https://lists.ghserv.net/mailman/listinfo/researchers&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
&lt;p&gt;► Support Us&lt;/p&gt;
&lt;div&gt;Please help us defend consumer and citizen rights on the Internet!  Write a cheque in favour of 'The Centre for Internet and Society' and  mail it to us at No. 194, 2nd 'C' Cross, Domlur, 2nd Stage, Bengaluru -  5600 71.&lt;/div&gt;
&lt;p&gt;► Request for Collaboration&lt;/p&gt;
&lt;div&gt;
&lt;p style="text-align: justify; "&gt;We invite researchers, practitioners, artists, and theoreticians,  both organisationally and as individuals, to engage with us on topics  related internet and society, and improve our collective understanding  of this field. To discuss such possibilities, please write to Sunil  Abraham, Executive Director, at sunil@cis-india.org (for policy research), or Sumandro Chattapadhyay, Research Director, at sumandro@cis-india.org (for  academic research), with an indication of the form and the content of  the collaboration you might be interested in. To discuss collaborations  on Indic language Wikipedia projects, write to Tanveer Hasan, Programme  Officer, at &lt;a&gt;tanveer@cis-india.org&lt;/a&gt;.&lt;/p&gt;
&lt;div style="text-align: justify; "&gt;&lt;i&gt;CIS is grateful to its primary donor the Kusuma Trust founded  by Anurag Dikshit and Soma Pujari, philanthropists of Indian origin for  its core funding and support for most of its projects. CIS is also  grateful to its other donors, Wikimedia Foundation, Ford Foundation,  Privacy International, UK, Hans Foundation, MacArthur Foundation, and  IDRC for funding its various projects&lt;/i&gt;.&lt;/div&gt;
&lt;/div&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/about/newsletters/july-2018-newsletter'&gt;https://cis-india.org/about/newsletters/july-2018-newsletter&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>praskrishna</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Telecom</dc:subject>
    
    
        <dc:subject>Researchers at Work</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Access to Knowledge</dc:subject>
    

   <dc:date>2018-08-11T02:50:52Z</dc:date>
   <dc:type>Page</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/a2k/blogs/workshop-of-river-activists-for-building-jal-bodh-knowledge-resource-on-water">
    <title>Workshop of River activists for building Jal Bodh - Knowledge resource on Water</title>
    <link>https://cis-india.org/a2k/blogs/workshop-of-river-activists-for-building-jal-bodh-knowledge-resource-on-water</link>
    <description>
        &lt;b&gt;To build knowledge resource on rivers in Pune district, CIS-A2K team organized a workshop in Pune on July 25, 2018.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;After the &lt;a href="https://meta.wikimedia.org/wiki/Exploring_Wikimedia_platforms_in_Dialogue_on_the_Urban_Rivers_of_Maharashtra" title="Exploring Wikimedia platforms in Dialogue on the Urban Rivers of Maharashtra"&gt;River Dialogue&lt;/a&gt;,  Jeevit nadi organisation has taken initiative to train their team of  activists in open knowledge and Wikimedia Projects. Activists and  researchers from &lt;a class="text external" href="http://www.jeevitnadi.org/" rel="nofollow"&gt;Jeevit Nadi&lt;/a&gt;, &lt;a class="text external" href="http://ecouniv.in/" rel="nofollow"&gt;EcoUniv&lt;/a&gt;, Jal Biradari, &lt;a class="text external" href="https://www.ecological-society.org/" rel="nofollow"&gt;Ecological Society&lt;/a&gt;, &lt;a class="text external" href="http://sagarmitra.org/" rel="nofollow"&gt;Sagarmitra&lt;/a&gt; organisations participated in the workshop. The groups presented their  work, the database and the resources with them. These include  photographs, videos, training material and data collected on site.  Extensive discussion was made for evolving methodology to integrate in  Wikimedia Projects. This was the first iteration in the series of  workshops.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The second iteration of building content on rivers in Pune  district was organised on 25th July by Jeevit Nadi organisation. In this  4 hour session, 5 participants worked on category tree and structure of  article on river. The editing skills for this were imparted. The images  and videos available in the repository were analysed for uploading. The  categorisation of media was also planned. The issues like maps of  rivers and heritage places on the banks were also discussed. This team  will prepare the database and organise the media files available for  uploading in respective categories. The next iteration is planned in  September.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Read the full details on &lt;a class="external-link" href="https://meta.wikimedia.org/wiki/Workshop_of_River_activists_for_building_Jal_Bodh_-_Knowledge_resource_on_Water"&gt;Wikimedia Blog&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/a2k/blogs/workshop-of-river-activists-for-building-jal-bodh-knowledge-resource-on-water'&gt;https://cis-india.org/a2k/blogs/workshop-of-river-activists-for-building-jal-bodh-knowledge-resource-on-water&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Wikimedia</dc:subject>
    
    
        <dc:subject>CIS-A2K</dc:subject>
    
    
        <dc:subject>Wikipedia</dc:subject>
    
    
        <dc:subject>Access to Knowledge</dc:subject>
    

   <dc:date>2018-08-11T01:45:18Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/the-centre-for-internet-and-society2019s-comments-and-recommendations-to-the-indian-privacy-code-2018">
    <title>The Centre for Internet and Society’s Comments and Recommendations to the: Indian Privacy Code, 2018 </title>
    <link>https://cis-india.org/internet-governance/blog/the-centre-for-internet-and-society2019s-comments-and-recommendations-to-the-indian-privacy-code-2018</link>
    <description>
        &lt;b&gt;The debate surrounding privacy has in recent times gained momentum due to the Aadhaar judgement and the growing concerns around the use of personal data by corporations and governments.&lt;/b&gt;
        &lt;p&gt;Click to download the &lt;a class="external-link" href="http://cis-india.org/internet-governance/files/indian-privacy-code"&gt;file here&lt;/a&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;As India moves towards greater digitization, and technology becomes even more pervasive, there is a need to ensure the privacy of the individual as well as hold the private and public sector accountable for the use of personal data. Towards enabling public discourse and furthering the development a privacy framework for India, a group of lawyers and policy analysts backed by the Internet Freedom Foundation (IFF) have put together a draft a citizen's bill encompassing a citizen centric privacy code that is based on seven guiding principles.&lt;a href="#_ftn1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; This draft builds on the Citizens Privacy Bill, 2013 that had been drafted by CIS on the basis of a series of roundtables conducted in India.&lt;a href="#_ftn2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Privacy is one of the key areas of research at CIS and we welcome this initiative and hope that our comments make the Act a stronger embodiment of the right to privacy.&lt;/p&gt;
&lt;h1 style="text-align: justify; "&gt;Section by Section Recommendations&lt;/h1&gt;
&lt;h2 style="text-align: justify; "&gt;Preamble&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; The Preamble specifies that the need for privacy has increased in the digital age, with the emergence of big data analytics.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; It could instead be worded as ‘with the emergence of technologies such as big data analytics’, so as to recognize the impact of multiple technologies and processes including big data analytics.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; The Preamble states that it is necessary for good governance that all interceptions of communication and surveillance be conducted in a systematic and transparent manner subservient to the rule of law.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Recommendation: The word ‘systematic’ is out of place, and can be interpreted incorrectly. It could instead be replaced with words such as ‘necessary’, ‘proportionate’, ‘specific’, and ‘narrow’, which would be more appropriate in this context.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Chapter 1&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;Preliminary&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 2: &lt;/b&gt;This Section defines the terms used in the Act.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Some of the terms are incomplete and a few of the terms used in the Act have not been included in the list of definitions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendations:&lt;/b&gt;&lt;/p&gt;
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;The term “effective consent” needs to be defined. The term is first used in the Proviso to Section 7(2), which states “Provided that effective consent can only be said to have been obtained where...:”It is crucial that the Act defines effective consent especially when it is with respect to sensitive data.&lt;/li&gt;
&lt;li&gt;The term “open data” needs to be defined. The term is first used in Section 5 that states the exemptions to the right to privacy. Subsection 1 clause ii states as follows “the collection, storage, processing or dissemination by a natural person of personal data for a strictly non-commercial purposes which may be classified as open data by the Privacy Commission”. Hence the term open data needs to be defined in order to ensure that there is no ambiguity in terms of what open data means.&lt;/li&gt;
&lt;li&gt;The Act does not define “erasure”, although the term erasure does come under the definition of destroy (Section 2(1)(p)). There are some provisions that use the word erasure , hence if erasure and destruction mean different acts then the term erasure needs to be defined, otherwise in order to maintain uniformity the sections where erasure is used could be substituted with the term “destroy” as defined under this Act.&lt;/li&gt;
&lt;li&gt;The definition of “sensitive personal data” does not include location data and identification numbers. The definition of sensitive data must include location data as the Act also deals in depth with surveillance. With respect to identification numbers, the Act needs to consider identification numbers (eg. the Aadhaar number, PAN number etc.) as sensitive information as this number is linked to a person's identity and can reveal sensitive personal data such as name, age, location, biometrics etc. Example can be taken from Section 4(1) of the GDPR&lt;a href="#_ftn3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; which identifies location data as well as identification numbers as sensitive personal data along with other identifies such as biometric data, gender race etc.&lt;/li&gt;
&lt;li&gt;The Act defines consent as the “unambiguous indication of a data subject’s agreement” however, the definition does not indicate that there needs to be an informed consent. Hence the revised definition could read as follows “the informed and unambiguous indication of a data subject’s agreement”. It is also unclear how this definition of consent relates to ‘effective consent’. This relationship needs to be clarified.&lt;/li&gt;
&lt;li&gt;The Act defines ‘data controller’ in Section 2(1)(l) as “ any person including appropriate government..”. In order to remove any ambiguity over the definition of the term person, the definition could specify that the term person means any natural or legal person.&lt;/li&gt;
&lt;li&gt;The Act defines ‘data processor’ in Section (2(1)(m) as “means any person including appropriate government”. In order to remove any ambiguity over the definition of the term ‘any person’, the definition could specify that the term person means any natural or legal person. &lt;/li&gt;
&lt;/ul&gt;
&lt;h2 style="text-align: justify; "&gt;CHAPTER II&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;Right to Privacy&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 5: &lt;/b&gt;This section provides exemption to the rights to privacy&lt;b&gt;. &lt;/b&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment: &lt;/b&gt;Section 5(1)(ii) states that the collection, storage, processing or dissemination by a natural person of personal data for a strictly non-commercial purposes are exempted from the provisions of the right to privacy. This clause also states that this data may be classified as open data by the Privacy Commission. This section hence provides individuals the immunity from collection, storage, processing and dissemination of data of another person. However this provision fails to state what specific activities qualify as non commercial use.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;This provision could potentially be strengthened by specifying that the use must be in the public interest. The other issue with this subsection is that it fails to define open data. If open data was to be examined using its common definition i.e “data that can be freely used, modified, and shared by anyone for any purpose”&lt;a href="#_ftn4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; then this section becomes highly problematic. As a simple interpretation would mean that any personal data that is collected, stored, processed or disseminated by a natural person can possibly become available to anyone. Beyond this, India has an existing framework governing open data. Ideally the privacy commissioner could work closely with government departments to ensure that open data practices in India are in compliance with the privacy law.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;CHAPTER III&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;Protection of Personal Data&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;PART A&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Notice by data controller &lt;/b&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 6: &lt;/b&gt;This section specifies the obligations to be followed by data controllers in their communication, to maintain transparency and lays down provisions that all communications by Data Controllers need to be complied with.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; There seems to be a error in the &lt;i&gt;Proviso &lt;/i&gt;to this section. The proviso states “Provided that all communications by the Data Controllers including but not limited to the rights of Data Subjects under this part &lt;b&gt;shall may be &lt;/b&gt;refused when the Data Controller is, unable to identify or has a well founded basis for reasonable doubts as to the identity of the Data Subject or are manifestly unfounded, excessive and repetitive, with respect to the information sought by the Data Subject ”.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;The proviso could read as follows “The proviso states “Provided that all communications by the Data Controllers including but not limited to the rights of Data Subjects under this part &lt;b&gt;&lt;i&gt;may&lt;/i&gt;&lt;/b&gt; be refused when the Data Controller is…”. We suggest the use of the ‘may’ as this makes the provision less limiting to the rights of the data controller.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Additionally, it is not completely clear what ‘included but not limited to...’ would entail. This could be clarified further.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;PART B&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;CONSENT OF DATA SUBJECTS&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 10: &lt;/b&gt;This section talks about the collection of personal data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 10(3) lays down the information that a person must provide before collecting the personal data of an individual.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 10(3)(xi) states as follows “the time and manner in which it will be destroyed, or the criteria used to Personal data collected in pursuance of a grant of consent by the data subject to whom it pertains shall, if that consent is subsequently withdrawn for any reason, be destroyed forthwith: determine that time period;”. There seems to be a problem with the sentence construction and the rather complex sentence is difficult to understand.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; This section could be reworked in such as way that two conditions are clear, one - the time and manner in which the data will be destroyed and two the status of the data once consent is withdrawn.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 10(3)(xiii) states that the identity and contact details of the data controller and data processor must be provided. However it fails to state that the data controller should provide more details with regard to the process for grievance redressal. It does not provide guidance on what type of information needs to go into this notice and the process of redressal. This could lead to very broad disclosures about the existence of redress mechanisms without providing individuals an effective avenue to pursue.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;As part of the requirement for providing the procedure for redress, data controllers could specifically be required to provide the details of the Privacy Officers, privacy commissioner, as well as provide more information on the redressal mechanisms and the process necessary to follow.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 11:&lt;/b&gt;This section lays out the provisions where collection of personal data without prior consent is possible.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 11 states “Personal data may be collected or received from a third party by a Data Controller the prior consent of the data subject only if it is:..”. However as the title of the section suggests the sentence could indicate the situations where it is permissible to collect personal data without prior consent from the data subject”. Hence the word “without” is missing from the sentence. Additionally the sentence could state that the personal data may be collected or received directly from an individual or from a third party as it is possible to directly collect personal data from an individual without consent.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt;The sentence could read as “Personal data may be collected or received from an &lt;b&gt;individual or a third party &lt;/b&gt;by a Data Controller &lt;b&gt;&lt;i&gt;without&lt;/i&gt;&lt;/b&gt; the prior consent of the data subject only if it is:..”.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 11(1)(i) states that the collection of personal data without prior consent when it is “necessary for the provision of an emergency medical service or essential services”. However it does not specify the kind or severity of the medical emergency.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;In addition to medical emergency another exception could be made for imminent threats to life.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 12: &lt;/b&gt;This section details the Special provisions in respect of data collected prior to the commencement of this Act.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; This section states that all data collected, processed and stored by data controllers and data processors prior to the date on which this Act comes into force shall be destroyed within a period of two years from the date on which this Act comes into force. Unless consent is obtained afresh within two years or that the personal data has been anonymised in such a manner to make re-identification of the data subject absolutely impossible. However this process can be highly difficult and impractical in terms of it being time consuming, expensive particularly, in cases of analog collections of data. This is especially problematic in cases where the controller cannot seek consent of the data subject due to change in address or inavailability or death. This will also be problematic in cases of digitized government records.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; We suggest three ways in which the issue of data collected prior to the Act can be handled. One way is to make a distinction on the data based on whether the data controller has specified the purpose of the collection before collecting the data. If the purpose was not defined then the data can be deleted or anonymised. Hence there is no need to collect the data afresh for all the cases. The purpose of the data can also be intimated to the data subject at a later stage and the data subject can choose if they would like the controller to store or process the data.The second way is by seeking consent afresh only for the sensitive data. Lastly, the data controller could be permitted to retain records of data, but must necessarily obtain fresh consent before using them. By not having a blanket provision of retrospective data deletion the Act can address situations where deletion is complicated or might have a potential negative impact by allowing storage, deletion, or anonymisation of data based on its purpose and kind.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section (2)(1)(i) of the Act states that the data will not be destroyed provided that &lt;b&gt;effective consent&lt;/b&gt; is obtained afresh within two years. However as stated earlier the Act does not define effective consent.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Recommendation: The term &lt;b&gt;effective consent &lt;/b&gt;needs to be defined in order to bring clarity to this provision.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;PART C&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;FURTHER LIMITATIONS ON DATA CONTROLLERS&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 16: &lt;/b&gt;This section deals with the security of personal data and duty of confidentiality.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 16(2) states “ Any person who collects, receives, stores, processes or otherwise handles any personal data shall be subject to a duty of confidentiality and secrecy in respect of it.” Similarly Section 16(3) states “data controllers and data processors shall be subject to a duty of confidentiality and secrecy in respect of personal data in their possession or control. However apart from the duty of confidentiality and secrecy the data collectors and processors could also have a duty to maintain the security of the data.” Though it is important for confidentiality and secrecy to be maintained, ensuring security requires adequate and effective technical controls to be in place.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; This section could also emphasise on the duty of the data controllers to ensure the security of the data. The breach notification could include details about data that is impacted by a breach or attach as well as the technical details of the infrastructure compromised.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 17:&lt;/b&gt; This section details the conditions for the transfer of personal data outside the territory of India.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 17 allows a transfer of personal data outside the territory of India in 3 situations- If the Central Government issues a notification deciding that the country/international organization in question can ensure an adequate level of protection, compatible with privacy principles contained in this Act; if the transfer is pursuant to an agreement which binds the recipient of the data to similar or stronger conditions in relation to handling the data; or if there are appropriate legal instruments and safeguards in place, to the satisfaction of the data controller. However, there is no clarification for what would constitute ‘adequate’ or ‘appropriate’ protection, and it does not account for situations in which the Government has not yet notified a country/organisation as ensuring adequate protection. In comparison, the GDPR, in Chapter V&lt;a href="#_ftn5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, contains factors that must be considered when determining adequacy of protection, including relevant legislation and data protection rules, the existence of independent supervisory authorities, and international commitments or obligations of the country/organization. Additionally, the GDPR allows data transfer even in the absence of the determination of such protection in certain instances, including the use of standard data protection clauses, that have been adopted or approved by the Commission; legally binding instruments between public authorities; approved code of conduct, etc. Additionally, it allows derogations from these measures in certain situations: when the data subject expressly agrees, despite being informed of the risks; or if the transfer is necessary for conclusion of contract between data subject and controller, or controller and third party in the interest of data subject; or if the transfer is necessary for reasons of public interest, etc. No such circumstances are accounted for in Section 17.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;Additionally, data controllers and processors could be provided with a period to allow them to align their policies towards the new legislation. Making these provisions operational as soon as the Act is commenced might put the controllers or processors guilty of involuntary breaching the provisions of the Act.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 19: &lt;/b&gt;This section&lt;b&gt; &lt;/b&gt;states the special provisions for sensitive personal data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 19(2) states that in addition to the requirements set out under sub-clause (1), the Privacy Commission shall set out additional protections in respect of:i.sensitive personal data relating to data subjects who are minors; ii.biometric and deoxyribonucleic acid data; and iii.financial and credit data.This however creates additional categories of sensitive data apart from the ones that have already been created.&lt;a href="#_ftn6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; These additional categories can result in confusion and errors.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;Sensitive data must not be further categorised as this can lead to confusion and errors. Hence all sensitive data could be subject to the same level of protection.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 20:&lt;/b&gt; This section states the special provisions for data impact assessment.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; This section states that all data impact assessment reports will be submitted periodically to the State Privacy commission. This section does not make provisions for instances of circumstances in which such records may be made public. Additionally the data impact assessment could also include a human rights impact assessment.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; The section could also have provisions for making the records of the impact assessment or relevant parts of the assessment public. This will ensure that the data controllers / processors are subjected to a standard of accountability and transparency. Additionally as privacy is linked to human rights the data impact assessment could also include a human rights impact assessment. The Act could further clarify the process for submission to State Privacy Commissions and potential access by the Central Privacy Commission to provide clarity in process.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Section 20 requires controllers who use new technology to assess the risks to the data protection rights that occur from processing. ‘New technology’ is defined to include pre-existing technology that is used anew. Additionally, the reports are required to be sent to the State Privacy Commission periodically. However, there is no clarification on the situations in which such an assessment becomes necessary, or whether all technology must undergo such an assessment before their use. Additionally, the differentiation between different data processing activities based on whether the data processing is incidental or a part of the functioning needs to be clarified. This differentiation is necessary as there are some data processors and controllers who need the data to function; for instance an ecommerce site would require your name and address to deliver the goods, although these sites do not process the data to make decisions. This can be compared to a credit rating agency that is using the data to make decisions as to who will be given a loan based on their creditworthiness. Example can taken from the GDPR, which in Article 35, specifies instances in which a data impact assessment is necessary: where a new technology, that is likely to result in a high risk to the rights of persons, is used; where personal aspects related to natural persons are processed automatically, including profiling; where processing of special categories of data (including data revealing ethnic/racial origin, sexual orientation etc), biometric/genetic data; where data relating to criminal convictions is processed; and with data concerning the monitoring of publicly accessible areas. Additionally, there is no requirement to publish the report, or send it to the supervising authority, but the controller is required to review the processor’s operations to ensure its compliance with the assessment report.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; The reports could be sent to a central authority, which according to this Act is the Privacy Commission, along with the State Privacy Commission. Additionally there needs to be a differentiation between the incidental and express use of data. The data processors must be given at least a period of one year after the commencement of the Act to present their impact assessment report. This period is required for the processors to align themselves with the provisions of the Act as well as conduct capacity building initiatives.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;PART C&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;RIGHTS OF A DATA SUBJECT&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 21: &lt;/b&gt;This section explains the right of the data subject with regard to accessing her data. It states that the data subject has the right to obtain from the data controller information as to whether any personal data concerning her is collected or processed. The data controller also has to not only provide access to such information but also the personal data that has been collected or processed.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; This section does not provide the data subject the right to seek information about security breaches.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;This section could state that the data subject has the right to seek information about any security breaches that might have compromised her data (through theft, loss, leaks etc.). This could also include steps taken by the data controller to address the immediate breach as well as steps to minimise the occurrence of such breaches in the future.&lt;a href="#_ftn7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;CHAPTER IV&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;INTERCEPTION AND SURVEILLANCE&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 28: &lt;/b&gt;This section lists out the special provisions for competent organizations.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 28(1) states ”all provisions of Chapter III shall apply to personal data collected, processed, stored, transferred or disclosed by competent organizations unless when done as per the provisions under this chapter ”.This does not make provisions for other categories of data such as sensitive data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; This section needs to include not just personal data but also sensitive data, in order to ensure that all types of data are protected under this Act.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 30:&lt;/b&gt; This section states the provisions for prior authorisation by the appropriate Surveillance and Interception Review Tribunal.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 30(5) states “any interception involving the infringement of the privacy of individuals who are not the subject of the intended interception, or where communications relate to &lt;b&gt;medical, journalistic, parliamentary or legally privileged material&lt;/b&gt; may be involved, shall satisfy additional conditions including the provision of specific prior justification in writing to the Office for Surveillance Reform of the Privacy Commission as to the necessity for the interception and the safeguards providing for minimizing the material intercepted to the greatest extent possible and the destruction of all such material that is not strictly necessary to the purpose of the interception.” This section needs to state why these categories of communication are more sensitive than others. Additionally, interceptions typically target people and not topics of communication - thus medical may be part of a conversation between two construction workers and a doctor will communicate about finances.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; The section could instead of singling out “medical, journalistic, parliamentary or legally privileged material” state that “any interception involving the infringement of the privacy of individuals who are not the subject of the intended interception may be involved, shall satisfy additional conditions including the provision of specific prior justification in writing to the Office for Surveillance Reform of the Privacy Commission.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 37&lt;/b&gt;: This section details the bar against surveillance.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment: &lt;/b&gt;Section 37(1) states that “no person shall order or carry out, or cause or assist the ordering or carrying out of, any surveillance of another person”. The section also prohibits indiscriminate monitoring, or mass surveillance, unless it is necessary and proportionate to the stated purpose. However, it is unclear whether this prohibits surveillance by a resident of their own residential property, which is allowed in Section 5, as the same could also fall within ‘indiscriminate monitoring/mass surveillance’. For instance, in the case of a camera installed in a residential property, which is outward facing, and therefore captures footage of the road/public space.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation:&lt;/b&gt; The Act needs to bring more clarity with regard to surveillance especially with respect to CCTV cameras that are installed in private places, but record public spaces such as public roads. The Act could have provisions that clearly define the use of CCTV cameras in order to ensure that cameras installed in private spaces are not used for carrying out mass surveillance. Further, the Act could address the use of emerging techniques and technology such as facial recognition technologies, that often rely on publicly available data.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;CHAPTER V&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;THE PRIVACY COMMISSION&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Section 53:&lt;/b&gt; This section details the powers and functions of the Privacy Commission.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; Section 53(2)(xiv) states that the Privacy Commission shall publish periodic reports “providing description of performance, findings, conclusions or recommendations of any or all of the functions assigned to the Privacy Commission”. However this Section does not make provisions for such reporting to happen annually and to make them publicly available, as well as contain details including financial aspects of matters contained within the Act.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Recommendation: &lt;/b&gt;The functions could include a duty to disclose the information regarding the functioning and financial aspects of matters contained within the Act. Categories that could be included in such reports include: the number of data controllers, number of data processors, number of breaches detected and mitigated etc.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;CHAPTER IX&lt;/h2&gt;
&lt;h2 style="text-align: justify; "&gt;OFFENCES AND PENALTIES&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt; Sections 73 to 80:&lt;/b&gt; These sections lay out the different punishments for controlling and processing data in contravention to the provisions of this Act.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;Comment:&lt;/b&gt; These sections, while laying out different punishments for controlling and processing data in contravention to the provisions of this Act, mets out a fine extending upto Rs. 10 crore. This is problematic as it does not base these penalties on the finer aspects of proportionality, such as  offences that are not as serious as the others.&lt;br /&gt; &lt;br /&gt; &lt;b&gt;Recommendation:&lt;/b&gt; There could be a graded approach to the penalties based on the degree of severity of the offence.This could be in the form of name and shame, warnings and penalties that can be graded based on the degree of the offence. &lt;br /&gt; ----------------------------------------------------------------------&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Additional thoughts: As India moves to a digital future there is a need for laws to be in place to ensure that individual's rights are not violated. By riding on the push to digitization, and emerging technologies such as AI, a strong all encompassing privacy legislation can allow India to leapfrog and use these emerging technologies for the benefit of the citizens without violating their privacy. A robust legislation can also ensure a level playing field for data driven enterprises within a framework of openness, fairness, accountability and transparency.&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; These seven principles include: Right to Access, Right to Rectification, Right to Erasure And Destruction of Personal Data,Right to Restriction Of Processing, Right to Object, Right to Portability of Personal Data,Right to Seek Exemption from Automated Decision-Making.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;The Privacy (Protection) Bill 2013: A Citizen’s Draft, Bhairav Acharya, Centre for Internet &amp;amp; Society, https://cis-india.org/internet-governance/blog/privacy-protection-bill-2013-citizens-draft&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;General Data Protection Regulation, available at https://gdpr-info.eu/art-4-gdpr/.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Antonio Vetro, Open Data Quality Measurement Framework: Definition and Application to Open Government Data, available at https://www.sciencedirect.com/science/article/pii/S0740624X16300132&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; General Data Protection Regulation, available at https://gdpr-info.eu/chapter-5/.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Sensitive personal data under Section 2(bb) includes, biometric data; deoxyribonucleic acid data;&lt;br /&gt; sexual preferences and practices;medical history and health information;political affiliation;&lt;br /&gt; membership of a political, cultural, social organisations including but not limited to a trade union as defined under Section 2(h) of the Trade Union Act, 1926;ethnicity, religion, race or caste; and&lt;br /&gt; financial and credit information, including financial history and transactions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Submission to the Committee of Experts on a Data Protection Framework for India, Amber Sinha, Centre for Internet &amp;amp; Society, available at https://cis-india.org/internet-governance/files/data-protection-submission&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/the-centre-for-internet-and-society2019s-comments-and-recommendations-to-the-indian-privacy-code-2018'&gt;https://cis-india.org/internet-governance/blog/the-centre-for-internet-and-society2019s-comments-and-recommendations-to-the-indian-privacy-code-2018&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Shweta Mohandas, Elonnai Hickok, Amber Sinha and Shruti Trikanand</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Aadhaar</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-07-20T13:55:46Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>




</rdf:RDF>
